summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--firmware/drivers/ata.c44
-rw-r--r--firmware/export/kernel.h10
-rw-r--r--firmware/export/thread.h14
-rw-r--r--firmware/kernel.c79
-rw-r--r--firmware/thread.c43
-rw-r--r--uisimulator/sdl/kernel.c12
6 files changed, 134 insertions, 68 deletions
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c
index 76c0090a12..d2c80e0ae0 100644
--- a/firmware/drivers/ata.c
+++ b/firmware/drivers/ata.c
@@ -104,7 +104,7 @@ STATICIRAM int wait_for_bsy(void)
104 long timeout = current_tick + HZ*30; 104 long timeout = current_tick + HZ*30;
105 while (TIME_BEFORE(current_tick, timeout) && (ATA_STATUS & STATUS_BSY)) { 105 while (TIME_BEFORE(current_tick, timeout) && (ATA_STATUS & STATUS_BSY)) {
106 last_disk_activity = current_tick; 106 last_disk_activity = current_tick;
107 yield(); 107 priority_yield();
108 } 108 }
109 109
110 if (TIME_BEFORE(current_tick, timeout)) 110 if (TIME_BEFORE(current_tick, timeout))
@@ -126,7 +126,7 @@ STATICIRAM int wait_for_rdy(void)
126 while (TIME_BEFORE(current_tick, timeout) && 126 while (TIME_BEFORE(current_tick, timeout) &&
127 !(ATA_ALT_STATUS & STATUS_RDY)) { 127 !(ATA_ALT_STATUS & STATUS_RDY)) {
128 last_disk_activity = current_tick; 128 last_disk_activity = current_tick;
129 yield(); 129 priority_yield();
130 } 130 }
131 131
132 if (TIME_BEFORE(current_tick, timeout)) 132 if (TIME_BEFORE(current_tick, timeout))
@@ -216,7 +216,7 @@ int ata_read_sectors(IF_MV2(int drive,)
216#ifdef HAVE_MULTIVOLUME 216#ifdef HAVE_MULTIVOLUME
217 (void)drive; /* unused for now */ 217 (void)drive; /* unused for now */
218#endif 218#endif
219 mutex_lock(&ata_mtx); 219 spinlock_lock(&ata_mtx);
220 220
221 last_disk_activity = current_tick; 221 last_disk_activity = current_tick;
222 spinup_start = current_tick; 222 spinup_start = current_tick;
@@ -227,14 +227,14 @@ int ata_read_sectors(IF_MV2(int drive,)
227 spinup = true; 227 spinup = true;
228 if (poweroff) { 228 if (poweroff) {
229 if (ata_power_on()) { 229 if (ata_power_on()) {
230 mutex_unlock(&ata_mtx); 230 spinlock_unlock(&ata_mtx);
231 ata_led(false); 231 ata_led(false);
232 return -1; 232 return -1;
233 } 233 }
234 } 234 }
235 else { 235 else {
236 if (perform_soft_reset()) { 236 if (perform_soft_reset()) {
237 mutex_unlock(&ata_mtx); 237 spinlock_unlock(&ata_mtx);
238 ata_led(false); 238 ata_led(false);
239 return -1; 239 return -1;
240 } 240 }
@@ -246,7 +246,7 @@ int ata_read_sectors(IF_MV2(int drive,)
246 SET_REG(ATA_SELECT, ata_device); 246 SET_REG(ATA_SELECT, ata_device);
247 if (!wait_for_rdy()) 247 if (!wait_for_rdy())
248 { 248 {
249 mutex_unlock(&ata_mtx); 249 spinlock_unlock(&ata_mtx);
250 ata_led(false); 250 ata_led(false);
251 return -2; 251 return -2;
252 } 252 }
@@ -359,7 +359,7 @@ int ata_read_sectors(IF_MV2(int drive,)
359 } 359 }
360 ata_led(false); 360 ata_led(false);
361 361
362 mutex_unlock(&ata_mtx); 362 spinlock_unlock(&ata_mtx);
363 363
364 return ret; 364 return ret;
365} 365}
@@ -417,7 +417,7 @@ int ata_write_sectors(IF_MV2(int drive,)
417 if (start == 0) 417 if (start == 0)
418 panicf("Writing on sector 0\n"); 418 panicf("Writing on sector 0\n");
419 419
420 mutex_lock(&ata_mtx); 420 spinlock_lock(&ata_mtx);
421 421
422 last_disk_activity = current_tick; 422 last_disk_activity = current_tick;
423 spinup_start = current_tick; 423 spinup_start = current_tick;
@@ -428,14 +428,14 @@ int ata_write_sectors(IF_MV2(int drive,)
428 spinup = true; 428 spinup = true;
429 if (poweroff) { 429 if (poweroff) {
430 if (ata_power_on()) { 430 if (ata_power_on()) {
431 mutex_unlock(&ata_mtx); 431 spinlock_unlock(&ata_mtx);
432 ata_led(false); 432 ata_led(false);
433 return -1; 433 return -1;
434 } 434 }
435 } 435 }
436 else { 436 else {
437 if (perform_soft_reset()) { 437 if (perform_soft_reset()) {
438 mutex_unlock(&ata_mtx); 438 spinlock_unlock(&ata_mtx);
439 ata_led(false); 439 ata_led(false);
440 return -1; 440 return -1;
441 } 441 }
@@ -445,7 +445,7 @@ int ata_write_sectors(IF_MV2(int drive,)
445 SET_REG(ATA_SELECT, ata_device); 445 SET_REG(ATA_SELECT, ata_device);
446 if (!wait_for_rdy()) 446 if (!wait_for_rdy())
447 { 447 {
448 mutex_unlock(&ata_mtx); 448 spinlock_unlock(&ata_mtx);
449 ata_led(false); 449 ata_led(false);
450 return -2; 450 return -2;
451 } 451 }
@@ -507,7 +507,7 @@ int ata_write_sectors(IF_MV2(int drive,)
507 507
508 ata_led(false); 508 ata_led(false);
509 509
510 mutex_unlock(&ata_mtx); 510 spinlock_unlock(&ata_mtx);
511 511
512 return ret; 512 return ret;
513} 513}
@@ -572,13 +572,13 @@ static int ata_perform_sleep(void)
572{ 572{
573 int ret = 0; 573 int ret = 0;
574 574
575 mutex_lock(&ata_mtx); 575 spinlock_lock(&ata_mtx);
576 576
577 SET_REG(ATA_SELECT, ata_device); 577 SET_REG(ATA_SELECT, ata_device);
578 578
579 if(!wait_for_rdy()) { 579 if(!wait_for_rdy()) {
580 DEBUGF("ata_perform_sleep() - not RDY\n"); 580 DEBUGF("ata_perform_sleep() - not RDY\n");
581 mutex_unlock(&ata_mtx); 581 spinlock_unlock(&ata_mtx);
582 return -1; 582 return -1;
583 } 583 }
584 584
@@ -591,7 +591,7 @@ static int ata_perform_sleep(void)
591 } 591 }
592 592
593 sleeping = true; 593 sleeping = true;
594 mutex_unlock(&ata_mtx); 594 spinlock_unlock(&ata_mtx);
595 return ret; 595 return ret;
596} 596}
597 597
@@ -649,9 +649,9 @@ static void ata_thread(void)
649 if ( !spinup && sleeping && !poweroff && 649 if ( !spinup && sleeping && !poweroff &&
650 TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT )) 650 TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT ))
651 { 651 {
652 mutex_lock(&ata_mtx); 652 spinlock_lock(&ata_mtx);
653 ide_power_enable(false); 653 ide_power_enable(false);
654 mutex_unlock(&ata_mtx); 654 spinlock_unlock(&ata_mtx);
655 poweroff = true; 655 poweroff = true;
656 } 656 }
657#endif 657#endif
@@ -663,11 +663,11 @@ static void ata_thread(void)
663#ifndef USB_NONE 663#ifndef USB_NONE
664 case SYS_USB_CONNECTED: 664 case SYS_USB_CONNECTED:
665 if (poweroff) { 665 if (poweroff) {
666 mutex_lock(&ata_mtx); 666 spinlock_lock(&ata_mtx);
667 ata_led(true); 667 ata_led(true);
668 ata_power_on(); 668 ata_power_on();
669 ata_led(false); 669 ata_led(false);
670 mutex_unlock(&ata_mtx); 670 spinlock_unlock(&ata_mtx);
671 } 671 }
672 672
673 /* Tell the USB thread that we are safe */ 673 /* Tell the USB thread that we are safe */
@@ -741,11 +741,11 @@ int ata_soft_reset(void)
741{ 741{
742 int ret; 742 int ret;
743 743
744 mutex_lock(&ata_mtx); 744 spinlock_lock(&ata_mtx);
745 745
746 ret = perform_soft_reset(); 746 ret = perform_soft_reset();
747 747
748 mutex_unlock(&ata_mtx); 748 spinlock_unlock(&ata_mtx);
749 return ret; 749 return ret;
750} 750}
751 751
@@ -936,7 +936,7 @@ int ata_init(void)
936 bool coldstart = ata_is_coldstart(); 936 bool coldstart = ata_is_coldstart();
937 /* must be called before ata_device_init() */ 937 /* must be called before ata_device_init() */
938 938
939 mutex_init(&ata_mtx); 939 spinlock_init(&ata_mtx);
940 940
941 ata_led(false); 941 ata_led(false);
942 ata_device_init(); 942 ata_device_init();
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index ec8aa28a08..495e558175 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -83,10 +83,14 @@ struct event_queue
83#endif 83#endif
84}; 84};
85 85
86#define MTX_UNOWNED 0x00000000
87#define MTX_BLOCKED_WAITER 0x00000001
88#define MTX_SPIN_WAITER 0x00010001
89#define MTX_SPIN_OWNER 0x00020001
86struct mutex 90struct mutex
87{ 91{
92 uint32_t locked;
88 struct thread_entry *thread; 93 struct thread_entry *thread;
89 bool locked;
90}; 94};
91 95
92/* global tick variable */ 96/* global tick variable */
@@ -126,8 +130,12 @@ extern void queue_remove_from_head(struct event_queue *q, long id);
126extern int queue_broadcast(long id, intptr_t data); 130extern int queue_broadcast(long id, intptr_t data);
127 131
128extern void mutex_init(struct mutex *m); 132extern void mutex_init(struct mutex *m);
133static inline void spinlock_init(struct mutex *m)
134{ mutex_init(m); } /* Same thing for now */
129extern void mutex_lock(struct mutex *m); 135extern void mutex_lock(struct mutex *m);
130extern void mutex_unlock(struct mutex *m); 136extern void mutex_unlock(struct mutex *m);
137extern void spinlock_lock(struct mutex *m);
138extern void spinlock_unlock(struct mutex *m);
131extern void tick_start(unsigned int interval_in_ms); 139extern void tick_start(unsigned int interval_in_ms);
132 140
133#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) 141#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 3a979722b9..7a9414c79f 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -105,6 +105,7 @@ struct thread_entry {
105 unsigned short stack_size; 105 unsigned short stack_size;
106#ifdef HAVE_PRIORITY_SCHEDULING 106#ifdef HAVE_PRIORITY_SCHEDULING
107 unsigned short priority; 107 unsigned short priority;
108 unsigned long priority_x;
108 long last_run; 109 long last_run;
109#endif 110#endif
110 struct thread_entry *next, *prev; 111 struct thread_entry *next, *prev;
@@ -114,6 +115,10 @@ struct core_entry {
114 struct thread_entry threads[MAXTHREADS]; 115 struct thread_entry threads[MAXTHREADS];
115 struct thread_entry *running; 116 struct thread_entry *running;
116 struct thread_entry *sleeping; 117 struct thread_entry *sleeping;
118#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
119 int switch_to_irq_level;
120 #define STAY_IRQ_LEVEL -1
121#endif
117}; 122};
118 123
119#ifdef HAVE_PRIORITY_SCHEDULING 124#ifdef HAVE_PRIORITY_SCHEDULING
@@ -149,7 +154,14 @@ void wakeup_thread(struct thread_entry **thread);
149#ifdef HAVE_PRIORITY_SCHEDULING 154#ifdef HAVE_PRIORITY_SCHEDULING
150int thread_set_priority(struct thread_entry *thread, int priority); 155int thread_set_priority(struct thread_entry *thread, int priority);
151int thread_get_priority(struct thread_entry *thread); 156int thread_get_priority(struct thread_entry *thread);
152#endif 157/* Yield that guarantees thread execution once per round regardless of
158 thread's scheduler priority - basically a transient realtime boost
159 without altering the scheduler's thread precedence. */
160void priority_yield(void);
161#else
162static inline void priority_yield(void)
163 { yield(); }
164#endif /* HAVE_PRIORITY_SCHEDULING */
153struct thread_entry * thread_get_current(void); 165struct thread_entry * thread_get_current(void);
154void init_threads(void); 166void init_threads(void);
155int thread_stack_usage(const struct thread_entry *thread); 167int thread_stack_usage(const struct thread_entry *thread);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 313530ffba..db7249fdee 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -656,48 +656,73 @@ void mutex_init(struct mutex *m)
656 m->thread = NULL; 656 m->thread = NULL;
657} 657}
658 658
659#ifdef CPU_PP 659/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
660/* PortalPlayer chips have 2 cores, therefore need atomic mutexes */ 660 * Just use it for ARM, Coldfire and whatever else well...why not?
661 */
661 662
662static inline bool test_and_set(bool *x, bool v) 663/* Macros generate better code than an inline function is this case */
663{ 664#if defined (CPU_PP) || defined (CPU_ARM)
664 asm volatile ( 665#define test_and_set(x_, v_) \
665 "swpb %0, %0, [%1]\n" 666({ \
666 : "+r"(v) 667 uint32_t old; \
667 : "r"(x) 668 asm volatile ( \
668 ); 669 "swpb %[old], %[v], [%[x]] \r\n" \
669 return v; 670 : [old]"=r"(old) \
670} 671 : [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \
672 ); \
673 old; \
674 })
675#elif defined (CPU_COLDFIRE)
676#define test_and_set(x_, v_) \
677({ \
678 uint8_t old; \
679 asm volatile ( \
680 "bset.l %[v], (%[x]) \r\n" \
681 "sne.b %[old] \r\n" \
682 : [old]"=d,d"(old) \
683 : [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \
684 ); \
685 old; \
686 })
687#else
688/* default for no asm version */
689#define test_and_set(x_, v_) \
690({ \
691 uint32_t old = *(uint32_t *)x_; \
692 *(uint32_t *)x_ = v_; \
693 old; \
694 })
695#endif
671 696
672void mutex_lock(struct mutex *m) 697void mutex_lock(struct mutex *m)
673{ 698{
674 if (test_and_set(&m->locked,true)) 699 if (test_and_set(&m->locked, 1))
675 { 700 {
676 /* Wait until the lock is open... */ 701 /* Wait until the lock is open... */
677 block_thread(&m->thread); 702 block_thread(&m->thread);
678 } 703 }
679} 704}
680 705
681#else 706void mutex_unlock(struct mutex *m)
682void mutex_lock(struct mutex *m)
683{ 707{
684 if (m->locked) 708 if (m->thread == NULL)
709 m->locked = 0;
710 else
711 wakeup_thread(&m->thread);
712}
713
714void spinlock_lock(struct mutex *m)
715{
716 while (test_and_set(&m->locked, 1))
685 { 717 {
686 /* Wait until the lock is open... */ 718 /* wait until the lock is open... */
687 block_thread(&m->thread); 719 switch_thread(true, NULL);
688 } 720 }
689
690 /* ...and lock it */
691 m->locked = true;
692} 721}
693#endif
694 722
695void mutex_unlock(struct mutex *m) 723void spinlock_unlock(struct mutex *m)
696{ 724{
697 if (m->thread == NULL) 725 m->locked = 0;
698 m->locked = false;
699 else
700 wakeup_thread(&m->thread);
701} 726}
702 727
703#endif 728#endif /* ndef SIMULATOR */
diff --git a/firmware/thread.c b/firmware/thread.c
index 614286c422..8022d94862 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -39,11 +39,6 @@ static unsigned short highest_priority IBSS_ATTR;
39static int boosted_threads IBSS_ATTR; 39static int boosted_threads IBSS_ATTR;
40#endif 40#endif
41 41
42#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
43#define STAY_IRQ_LEVEL -1
44static int switch_to_irq_level = STAY_IRQ_LEVEL;
45#endif
46
47/* Define to enable additional checks for blocking violations etc. */ 42/* Define to enable additional checks for blocking violations etc. */
48#define THREAD_EXTRA_CHECKS 43#define THREAD_EXTRA_CHECKS
49 44
@@ -136,11 +131,11 @@ static inline void load_context(const void* addr)
136 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */ 131 "movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
137 "move.l %%d0,%%macsr \n" 132 "move.l %%d0,%%macsr \n"
138 "move.l (52,%0),%%d0 \n" /* Get start address */ 133 "move.l (52,%0),%%d0 \n" /* Get start address */
139 "beq.b .running \n" /* NULL -> already running */ 134 "beq.b 1f \n" /* NULL -> already running */
140 "clr.l (52,%0) \n" /* Clear start address.. */ 135 "clr.l (52,%0) \n" /* Clear start address.. */
141 "move.l %%d0,%0 \n" 136 "move.l %%d0,%0 \n"
142 "jmp (%0) \n" /* ..and start the thread */ 137 "jmp (%0) \n" /* ..and start the thread */
143 ".running: \n" 138 "1: \n"
144 : : "a" (addr) : "d0" /* only! */ 139 : : "a" (addr) : "d0" /* only! */
145 ); 140 );
146} 141}
@@ -422,10 +417,10 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
422 /* This has to be done after the scheduler is finished with the 417 /* This has to be done after the scheduler is finished with the
423 blocked_list pointer so that an IRQ can't kill us by attempting 418 blocked_list pointer so that an IRQ can't kill us by attempting
424 a wake but before attempting any core sleep. */ 419 a wake but before attempting any core sleep. */
425 if (switch_to_irq_level != STAY_IRQ_LEVEL) 420 if (cores[CURRENT_CORE].switch_to_irq_level != STAY_IRQ_LEVEL)
426 { 421 {
427 int level = switch_to_irq_level; 422 int level = cores[CURRENT_CORE].switch_to_irq_level;
428 switch_to_irq_level = STAY_IRQ_LEVEL; 423 cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL;
429 set_irq_level(level); 424 set_irq_level(level);
430 } 425 }
431#endif 426#endif
@@ -442,13 +437,14 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
442 for (;;) 437 for (;;)
443 { 438 {
444 int priority = cores[CURRENT_CORE].running->priority; 439 int priority = cores[CURRENT_CORE].running->priority;
445 440
446 if (priority < highest_priority) 441 if (priority < highest_priority)
447 highest_priority = priority; 442 highest_priority = priority;
448 443
449 if (priority == highest_priority || 444 if (priority == highest_priority ||
450 (current_tick - cores[CURRENT_CORE].running->last_run > 445 (current_tick - cores[CURRENT_CORE].running->last_run >
451 priority * 8)) 446 priority * 8) ||
447 cores[CURRENT_CORE].running->priority_x != 0)
452 break; 448 break;
453 449
454 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next; 450 cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
@@ -567,7 +563,7 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
567#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR) 563#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
568void set_irq_level_and_block_thread(struct thread_entry **list, int level) 564void set_irq_level_and_block_thread(struct thread_entry **list, int level)
569{ 565{
570 switch_to_irq_level = level; 566 cores[CURRENT_CORE].switch_to_irq_level = level;
571 block_thread(list); 567 block_thread(list);
572} 568}
573 569
@@ -575,7 +571,7 @@ void set_irq_level_and_block_thread(struct thread_entry **list, int level)
575void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list, 571void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
576 int timeout, int level) 572 int timeout, int level)
577{ 573{
578 switch_to_irq_level = level; 574 cores[CURRENT_CORE].switch_to_irq_level = level;
579 block_thread_w_tmo(list, timeout); 575 block_thread_w_tmo(list, timeout);
580} 576}
581#endif 577#endif
@@ -688,6 +684,7 @@ struct thread_entry*
688 thread->stack_size = stack_size; 684 thread->stack_size = stack_size;
689 thread->statearg = 0; 685 thread->statearg = 0;
690#ifdef HAVE_PRIORITY_SCHEDULING 686#ifdef HAVE_PRIORITY_SCHEDULING
687 thread->priority_x = 0;
691 thread->priority = priority; 688 thread->priority = priority;
692 highest_priority = 100; 689 highest_priority = 100;
693#endif 690#endif
@@ -759,7 +756,7 @@ int thread_set_priority(struct thread_entry *thread, int priority)
759 756
760 if (thread == NULL) 757 if (thread == NULL)
761 thread = cores[CURRENT_CORE].running; 758 thread = cores[CURRENT_CORE].running;
762 759
763 old_priority = thread->priority; 760 old_priority = thread->priority;
764 thread->priority = priority; 761 thread->priority = priority;
765 highest_priority = 100; 762 highest_priority = 100;
@@ -774,7 +771,15 @@ int thread_get_priority(struct thread_entry *thread)
774 771
775 return thread->priority; 772 return thread->priority;
776} 773}
777#endif 774
775void priority_yield(void)
776{
777 struct thread_entry *thread = cores[CURRENT_CORE].running;
778 thread->priority_x = 1;
779 switch_thread(true, NULL);
780 thread->priority_x = 0;
781}
782#endif /* HAVE_PRIORITY_SCHEDULING */
778 783
779struct thread_entry * thread_get_current(void) 784struct thread_entry * thread_get_current(void)
780{ 785{
@@ -789,10 +794,14 @@ void init_threads(void)
789 memset(cores, 0, sizeof cores); 794 memset(cores, 0, sizeof cores);
790 cores[core].sleeping = NULL; 795 cores[core].sleeping = NULL;
791 cores[core].running = NULL; 796 cores[core].running = NULL;
797#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
798 cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
799#endif
792 cores[core].threads[0].name = main_thread_name; 800 cores[core].threads[0].name = main_thread_name;
793 cores[core].threads[0].statearg = 0; 801 cores[core].threads[0].statearg = 0;
794#ifdef HAVE_PRIORITY_SCHEDULING 802#ifdef HAVE_PRIORITY_SCHEDULING
795 cores[core].threads[0].priority = PRIORITY_USER_INTERFACE; 803 cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
804 cores[core].threads[0].priority_x = 0;
796 highest_priority = 100; 805 highest_priority = 100;
797#endif 806#endif
798#ifdef HAVE_SCHEDULER_BOOSTCTRL 807#ifdef HAVE_SCHEDULER_BOOSTCTRL
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
index dddfa70066..e01fbe65b9 100644
--- a/uisimulator/sdl/kernel.c
+++ b/uisimulator/sdl/kernel.c
@@ -355,3 +355,15 @@ void mutex_unlock(struct mutex *m)
355{ 355{
356 m->locked = false; 356 m->locked = false;
357} 357}
358
359void spinlock_lock(struct mutex *m)
360{
361 while(m->locked)
362 switch_thread(true, NULL);
363 m->locked = true;
364}
365
366void spinlock_unlock(struct mutex *m)
367{
368 m->locked = false;
369}