summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2008-01-19 13:27:47 +0000
committerMichael Sevakis <jethead71@rockbox.org>2008-01-19 13:27:47 +0000
commit32a531b09bf05000048bb3e1cbc25556075e2334 (patch)
tree3f7ec134a2c2e23a9bf8d8f8755034947afdde66
parent2235c7e582f21789f4f881380a983cae1737bb37 (diff)
downloadrockbox-32a531b09bf05000048bb3e1cbc25556075e2334.tar.gz
rockbox-32a531b09bf05000048bb3e1cbc25556075e2334.zip
Do some simplification in switch_thread regarding interrupt enabling which simplifies kernel objects as well. Simply doing unconditional disable/enable should be fine in the firmware. Not sure about the case in the bootloader for all targets and so will evaluate but that should be fine too if everything is masked.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16107 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/export/thread.h7
-rw-r--r--firmware/kernel.c21
-rw-r--r--firmware/thread.c218
-rw-r--r--uisimulator/sdl/kernel.c3
-rw-r--r--uisimulator/sdl/thread-sdl.c13
5 files changed, 75 insertions, 187 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 0b1500cd99..dd97ab1e83 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -279,12 +279,10 @@ struct thread_entry
279#define TBOP_UNLOCK_CORELOCK 0x04 279#define TBOP_UNLOCK_CORELOCK 0x04
280#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */ 280#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
281#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */ 281#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
282#define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */ 282#define TBOP_SWITCH_CORE 0x20 /* Call the core switch preparation routine */
283#define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */
284 283
285struct thread_blk_ops 284struct thread_blk_ops
286{ 285{
287 int irq_level; /* new IRQ level to set */
288#if CONFIG_CORELOCK != SW_CORELOCK 286#if CONFIG_CORELOCK != SW_CORELOCK
289 union 287 union
290 { 288 {
@@ -330,9 +328,6 @@ struct core_entry
330#if NUM_CORES > 1 328#if NUM_CORES > 1
331 struct thread_blk_ops blk_ops; /* operations to perform when 329 struct thread_blk_ops blk_ops; /* operations to perform when
332 blocking a thread */ 330 blocking a thread */
333#else
334 #define STAY_IRQ_LEVEL (-1)
335 int irq_level; /* sets the irq level to irq_level */
336#endif /* NUM_CORES */ 331#endif /* NUM_CORES */
337#ifdef HAVE_PRIORITY_SCHEDULING 332#ifdef HAVE_PRIORITY_SCHEDULING
338 unsigned char highest_priority; 333 unsigned char highest_priority;
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 204f3e8141..35bdec7dfc 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -298,16 +298,13 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
298 do 298 do
299 { 299 {
300#if CONFIG_CORELOCK == CORELOCK_NONE 300#if CONFIG_CORELOCK == CORELOCK_NONE
301 cores[CURRENT_CORE].irq_level = oldlevel;
302#elif CONFIG_CORELOCK == SW_CORELOCK 301#elif CONFIG_CORELOCK == SW_CORELOCK
303 const unsigned int core = CURRENT_CORE; 302 const unsigned int core = CURRENT_CORE;
304 cores[core].blk_ops.irq_level = oldlevel; 303 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
305 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
306 cores[core].blk_ops.cl_p = &q->cl; 304 cores[core].blk_ops.cl_p = &q->cl;
307#elif CONFIG_CORELOCK == CORELOCK_SWAP 305#elif CONFIG_CORELOCK == CORELOCK_SWAP
308 const unsigned int core = CURRENT_CORE; 306 const unsigned int core = CURRENT_CORE;
309 cores[core].blk_ops.irq_level = oldlevel; 307 cores[core].blk_ops.flags = TBOP_SET_VARu8;
310 cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
311 cores[core].blk_ops.var_u8p = &q->cl.locked; 308 cores[core].blk_ops.var_u8p = &q->cl.locked;
312 cores[core].blk_ops.var_u8v = 0; 309 cores[core].blk_ops.var_u8v = 0;
313#endif /* CONFIG_CORELOCK */ 310#endif /* CONFIG_CORELOCK */
@@ -352,16 +349,13 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
352 if (q->read == q->write && ticks > 0) 349 if (q->read == q->write && ticks > 0)
353 { 350 {
354#if CONFIG_CORELOCK == CORELOCK_NONE 351#if CONFIG_CORELOCK == CORELOCK_NONE
355 cores[CURRENT_CORE].irq_level = oldlevel;
356#elif CONFIG_CORELOCK == SW_CORELOCK 352#elif CONFIG_CORELOCK == SW_CORELOCK
357 const unsigned int core = CURRENT_CORE; 353 const unsigned int core = CURRENT_CORE;
358 cores[core].blk_ops.irq_level = oldlevel; 354 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
359 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
360 cores[core].blk_ops.cl_p = &q->cl; 355 cores[core].blk_ops.cl_p = &q->cl;
361#elif CONFIG_CORELOCK == CORELOCK_SWAP 356#elif CONFIG_CORELOCK == CORELOCK_SWAP
362 const unsigned int core = CURRENT_CORE; 357 const unsigned int core = CURRENT_CORE;
363 cores[core].blk_ops.irq_level = oldlevel; 358 cores[core].blk_ops.flags = TBOP_SET_VARu8;
364 cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
365 cores[core].blk_ops.var_u8p = &q->cl.locked; 359 cores[core].blk_ops.var_u8p = &q->cl.locked;
366 cores[core].blk_ops.var_u8v = 0; 360 cores[core].blk_ops.var_u8v = 0;
367#endif 361#endif
@@ -458,14 +452,11 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
458 wakeup_thread(&q->queue); 452 wakeup_thread(&q->queue);
459 453
460#if CONFIG_CORELOCK == CORELOCK_NONE 454#if CONFIG_CORELOCK == CORELOCK_NONE
461 cores[core].irq_level = oldlevel;
462#elif CONFIG_CORELOCK == SW_CORELOCK 455#elif CONFIG_CORELOCK == SW_CORELOCK
463 cores[core].blk_ops.irq_level = oldlevel; 456 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
464 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
465 cores[core].blk_ops.cl_p = &q->cl; 457 cores[core].blk_ops.cl_p = &q->cl;
466#elif CONFIG_CORELOCK == CORELOCK_SWAP 458#elif CONFIG_CORELOCK == CORELOCK_SWAP
467 cores[core].blk_ops.irq_level = oldlevel; 459 cores[core].blk_ops.flags = TBOP_SET_VARu8;
468 cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
469 cores[core].blk_ops.var_u8p = &q->cl.locked; 460 cores[core].blk_ops.var_u8p = &q->cl.locked;
470 cores[core].blk_ops.var_u8v = 0; 461 cores[core].blk_ops.var_u8v = 0;
471#endif 462#endif
diff --git a/firmware/thread.c b/firmware/thread.c
index 37157be245..96088bec72 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -98,23 +98,17 @@ extern int stackbegin[];
98extern int stackend[]; 98extern int stackend[];
99 99
100/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup 100/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
101 * never results in requiring a wait until the next tick (up to 10000uS!). Likely 101 * never results in requiring a wait until the next tick (up to 10000uS!). May
102 * requires assembly and careful instruction ordering. Multicore requires 102 * require assembly and careful instruction ordering.
103 * carefully timed sections in order to have synchronization without locking of
104 * any sort.
105 * 103 *
106 * 1) Disable all interrupts (FIQ and IRQ for ARM for instance) 104 * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4.
107 * 2) Check *waking == NULL. 105 * 2) If processor requires, atomically reenable interrupts and perform step 3.
108 * 3) *waking not NULL? Goto step 7. 106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
109 * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7. 107 * goto step 5.
110 * 5) If processor requires, atomically reenable interrupts and perform step 6. 108 * 4) Enable interrupts.
111 * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire) 109 * 5) Exit procedure.
112 * goto step 8.
113 * 7) Reenable interrupts.
114 * 8) Exit procedure.
115 */ 110 */
116static inline void core_sleep( 111static inline void core_sleep(IF_COP_VOID(unsigned int core))
117 IF_COP(unsigned int core,) struct thread_entry **waking)
118 __attribute__((always_inline)); 112 __attribute__((always_inline));
119 113
120static void check_tmo_threads(void) 114static void check_tmo_threads(void)
@@ -407,34 +401,22 @@ void corelock_unlock(struct corelock *cl)
407 */ 401 */
408#if NUM_CORES == 1 402#if NUM_CORES == 1
409/* Shared single-core build debugging version */ 403/* Shared single-core build debugging version */
410static inline void core_sleep(struct thread_entry **waking) 404static inline void core_sleep(void)
411{ 405{
412 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS); 406 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
413 if (*waking == NULL) 407 nop; nop; nop;
414 {
415 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
416 nop; nop; nop;
417 }
418 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 408 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
419} 409}
420#elif defined (CPU_PP502x) 410#elif defined (CPU_PP502x)
421static inline void core_sleep(unsigned int core, 411static inline void core_sleep(unsigned int core)
422 struct thread_entry **waking)
423{ 412{
424#if 1 413#if 1
425 /* Disabling IRQ and FIQ is important to making the fixed-time sequence
426 * non-interruptable */
427 asm volatile ( 414 asm volatile (
428 "mrs r2, cpsr \n" /* Disable IRQ, FIQ */
429 "orr r2, r2, #0xc0 \n"
430 "msr cpsr_c, r2 \n"
431 "mov r0, #4 \n" /* r0 = 0x4 << core */ 415 "mov r0, #4 \n" /* r0 = 0x4 << core */
432 "mov r0, r0, lsl %[c] \n" 416 "mov r0, r0, lsl %[c] \n"
433 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ 417 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
434 "ldr r1, [%[waking]] \n" /* *waking == NULL ? */ 418 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
435 "cmp r1, #0 \n" 419 "tst r1, r0, lsl #2 \n"
436 "ldreq r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
437 "tsteq r1, r0, lsl #2 \n"
438 "moveq r1, #0x80000000 \n" /* Then sleep */ 420 "moveq r1, #0x80000000 \n" /* Then sleep */
439 "streq r1, [%[ctl], %[c], lsl #2] \n" 421 "streq r1, [%[ctl], %[c], lsl #2] \n"
440 "moveq r1, #0 \n" /* Clear control reg */ 422 "moveq r1, #0 \n" /* Clear control reg */
@@ -445,21 +427,18 @@ static inline void core_sleep(unsigned int core,
445 "ldr r1, [%[mbx], #0] \n" 427 "ldr r1, [%[mbx], #0] \n"
446 "tst r1, r0, lsr #2 \n" 428 "tst r1, r0, lsr #2 \n"
447 "bne 1b \n" 429 "bne 1b \n"
448 "bic r2, r2, #0xc0 \n" /* Enable interrupts */ 430 "mrs r1, cpsr \n" /* Enable interrupts */
449 "msr cpsr_c, r2 \n" 431 "bic r1, r1, #0xc0 \n"
432 "msr cpsr_c, r1 \n"
450 : 433 :
451 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), 434 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core)
452 [waking]"r"(waking), [c]"r"(core) 435 : "r0", "r1");
453 : "r0", "r1", "r2");
454#else /* C version for reference */ 436#else /* C version for reference */
455 /* Disable IRQ, FIQ */
456 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
457
458 /* Signal intent to sleep */ 437 /* Signal intent to sleep */
459 MBX_MSG_SET = 0x4 << core; 438 MBX_MSG_SET = 0x4 << core;
460 439
461 /* Something waking or other processor intends to wake us? */ 440 /* Something waking or other processor intends to wake us? */
462 if (*waking == NULL && (MBX_MSG_STAT & (0x10 << core)) == 0) 441 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
463 { 442 {
464 PROC_CTL(core) = PROC_SLEEP; nop; /* Snooze */ 443 PROC_CTL(core) = PROC_SLEEP; nop; /* Snooze */
465 PROC_CTL(core) = 0; /* Clear control reg */ 444 PROC_CTL(core) = 0; /* Clear control reg */
@@ -477,20 +456,14 @@ static inline void core_sleep(unsigned int core,
477} 456}
478#elif CONFIG_CPU == PP5002 457#elif CONFIG_CPU == PP5002
479/* PP5002 has no mailboxes - emulate using bytes */ 458/* PP5002 has no mailboxes - emulate using bytes */
480static inline void core_sleep(unsigned int core, 459static inline void core_sleep(unsigned int core)
481 struct thread_entry **waking)
482{ 460{
483#if 1 461#if 1
484 asm volatile ( 462 asm volatile (
485 "mrs r1, cpsr \n" /* Disable IRQ, FIQ */
486 "orr r1, r1, #0xc0 \n"
487 "msr cpsr_c, r1 \n"
488 "mov r0, #1 \n" /* Signal intent to sleep */ 463 "mov r0, #1 \n" /* Signal intent to sleep */
489 "strb r0, [%[sem], #2] \n" 464 "strb r0, [%[sem], #2] \n"
490 "ldr r0, [%[waking]] \n" /* *waking == NULL? */ 465 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
491 "cmp r0, #0 \n" 466 "cmp r0, #0 \n"
492 "ldreqb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
493 "cmpeq r0, #0 \n"
494 "moveq r0, #0xca \n" /* Then sleep */ 467 "moveq r0, #0xca \n" /* Then sleep */
495 "streqb r0, [%[ctl], %[c], lsl #2] \n" 468 "streqb r0, [%[ctl], %[c], lsl #2] \n"
496 "nop \n" /* nop's needed because of pipeline */ 469 "nop \n" /* nop's needed because of pipeline */
@@ -503,22 +476,20 @@ static inline void core_sleep(unsigned int core,
503 "ldrb r0, [%[sem], #0] \n" 476 "ldrb r0, [%[sem], #0] \n"
504 "cmp r0, #0 \n" 477 "cmp r0, #0 \n"
505 "bne 1b \n" 478 "bne 1b \n"
506 "bic r1, r1, #0xc0 \n" /* Enable interrupts */ 479 "mrs r0, cpsr \n" /* Enable interrupts */
507 "msr cpsr_c, r1 \n" 480 "bic r0, r0, #0xc0 \n"
481 "msr cpsr_c, r0 \n"
508 : 482 :
509 : [sem]"r"(&core_semaphores[core]), [c]"r"(core), 483 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
510 [waking]"r"(waking), [ctl]"r"(&PROC_CTL(CPU)) 484 [ctl]"r"(&PROC_CTL(CPU))
511 : "r0", "r1" 485 : "r0"
512 ); 486 );
513#else /* C version for reference */ 487#else /* C version for reference */
514 /* Disable IRQ, FIQ */
515 set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
516
517 /* Signal intent to sleep */ 488 /* Signal intent to sleep */
518 core_semaphores[core].intend_sleep = 1; 489 core_semaphores[core].intend_sleep = 1;
519 490
520 /* Something waking or other processor intends to wake us? */ 491 /* Something waking or other processor intends to wake us? */
521 if (*waking == NULL && core_semaphores[core].stay_awake == 0) 492 if (core_semaphores[core].stay_awake == 0)
522 { 493 {
523 PROC_CTL(core) = PROC_SLEEP; /* Snooze */ 494 PROC_CTL(core) = PROC_SLEEP; /* Snooze */
524 nop; nop; nop; 495 nop; nop; nop;
@@ -747,48 +718,40 @@ static void switch_thread_core(unsigned int core, struct thread_entry *thread)
747 * Put core in a power-saving state if waking list wasn't repopulated. 718 * Put core in a power-saving state if waking list wasn't repopulated.
748 *--------------------------------------------------------------------------- 719 *---------------------------------------------------------------------------
749 */ 720 */
750static inline void core_sleep(struct thread_entry **waking) 721static inline void core_sleep(void)
751{ 722{
752 /* FIQ also changes the CLKCON register so FIQ must be disabled 723 /* FIQ also changes the CLKCON register so FIQ must be disabled
753 when changing it here */ 724 when changing it here */
754 asm volatile ( 725 asm volatile (
755 "mrs r0, cpsr \n" /* Disable IRQ, FIQ */ 726 "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */
756 "orr r0, r0, #0xc0 \n" 727 "bic r0, r0, #0xc0 \n"
757 "msr cpsr_c, r0 \n"
758 "ldr r1, [%0] \n" /* Check *waking */
759 "cmp r1, #0 \n"
760 "bne 2f \n" /* != NULL -> exit */
761 "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */
762 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */ 728 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
763 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */ 729 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
764 "orr r2, r2, #4 \n" 730 "orr r2, r2, #4 \n"
765 "str r2, [r1, #0xc] \n" 731 "str r2, [r1, #0xc] \n"
766 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */ 732 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
767 "mov r3, #0 \n" /* wait for IDLE */ 733 "mov r2, #0 \n" /* wait for IDLE */
768 "1: \n" 734 "1: \n"
769 "add r3, r3, #1 \n" 735 "add r2, r2, #1 \n"
770 "cmp r3, #10 \n" 736 "cmp r2, #10 \n"
771 "bne 1b \n" 737 "bne 1b \n"
772 "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */ 738 "orr r2, r0, #0xc0 \n" /* Disable IRQ, FIQ */
773 "msr cpsr_c, r0 \n" 739 "msr cpsr_c, r2 \n"
774 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */ 740 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
775 "bic r2, r2, #4 \n" 741 "bic r2, r2, #4 \n"
776 "str r2, [r1, #0xc] \n" 742 "str r2, [r1, #0xc] \n"
777 "2: \n" 743 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
778 "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */ 744 : : : "r0", "r1", "r2");
779 "msr cpsr_c, r0 \n"
780 : : "r"(waking) : "r0", "r1", "r2", "r3");
781} 745}
782#elif defined(CPU_TCC77X) 746#elif defined(CPU_TCC77X)
783static inline void core_sleep(struct thread_entry **waking) 747static inline void core_sleep(void)
784{ 748{
785 #warning TODO: Implement core_sleep 749 #warning TODO: Implement core_sleep
786} 750}
787#else 751#else
788static inline void core_sleep(struct thread_entry **waking) 752static inline void core_sleep(void)
789{ 753{
790 (void) waking; 754 #warning core_sleep not implemented, battery life will be decreased
791#warning core_sleep not implemented, battery life will be decreased
792} 755}
793#endif /* CONFIG_CPU == */ 756#endif /* CONFIG_CPU == */
794 757
@@ -864,23 +827,10 @@ static inline void load_context(const void* addr)
864 * Put core in a power-saving state if waking list wasn't repopulated. 827 * Put core in a power-saving state if waking list wasn't repopulated.
865 *--------------------------------------------------------------------------- 828 *---------------------------------------------------------------------------
866 */ 829 */
867static inline void core_sleep(struct thread_entry **waking) 830static inline void core_sleep(void)
868{ 831{
869 asm volatile ( 832 /* Supervisor mode, interrupts enabled upon wakeup */
870 "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */ 833 asm volatile ("stop #0x2000");
871 "lsl.l #8, %%d0 \n"
872 "move.w %%d0, %%sr \n"
873 "tst.l (%0) \n" /* Check *waking */
874 "beq.b 1f \n" /* != NULL -> exit */
875 "moveq.l #0x20, %%d0 \n" /* Enable interrupts */
876 "lsl.l #8, %%d0 \n"
877 "move.w %%d0, %%sr \n"
878 ".word 0x51fb \n" /* tpf.l - eat stop instruction */
879 "1: \n"
880 "stop #0x2000 \n" /* Supervisor mode, interrupts enabled
881 upon wakeup */
882 : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0"
883 );
884}; 834};
885 835
886#elif CONFIG_CPU == SH7034 836#elif CONFIG_CPU == SH7034
@@ -965,26 +915,14 @@ static inline void load_context(const void* addr)
965 * Put core in a power-saving state if waking list wasn't repopulated. 915 * Put core in a power-saving state if waking list wasn't repopulated.
966 *--------------------------------------------------------------------------- 916 *---------------------------------------------------------------------------
967 */ 917 */
968static inline void core_sleep(struct thread_entry **waking) 918static inline void core_sleep(void)
969{ 919{
970 asm volatile ( 920 asm volatile (
971 "mov %2, r1 \n" /* Disable interrupts */
972 "ldc r1, sr \n"
973 "mov.l @%1, r1 \n" /* Check *waking */
974 "tst r1, r1 \n"
975 "bf 1f \n" /* *waking != NULL ? exit */
976 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */ 921 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
977 "mov #0, r1 \n" /* Enable interrupts */ 922 "mov #0, r1 \n" /* Enable interrupts */
978 "ldc r1, sr \n" /* Following instruction cannot be interrupted */ 923 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
979 "bra 2f \n" /* bra and sleep are executed at once */
980 "sleep \n" /* Execute standby */ 924 "sleep \n" /* Execute standby */
981 "1: \n" 925 : : "z"(&SBYCR-GBR) : "r1");
982 "mov #0, r1 \n" /* Enable interrupts */
983 "ldc r1, sr \n"
984 "2: \n"
985 :
986 : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL)
987 : "r1");
988} 926}
989 927
990#endif /* CONFIG_CPU == */ 928#endif /* CONFIG_CPU == */
@@ -1318,7 +1256,6 @@ static void core_schedule_wakeup(struct thread_entry *thread)
1318 */ 1256 */
1319static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core)) 1257static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1320{ 1258{
1321 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1322 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking); 1259 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
1323 struct thread_entry *r = cores[IF_COP_CORE(core)].running; 1260 struct thread_entry *r = cores[IF_COP_CORE(core)].running;
1324 1261
@@ -1346,7 +1283,6 @@ static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1346 1283
1347 /* Waking list is clear - NULL and unlock it */ 1284 /* Waking list is clear - NULL and unlock it */
1348 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL); 1285 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
1349 set_irq_level(oldlevel);
1350} 1286}
1351 1287
1352/*--------------------------------------------------------------------------- 1288/*---------------------------------------------------------------------------
@@ -1367,8 +1303,6 @@ static void check_tmo_threads(void)
1367 if (next != NULL) 1303 if (next != NULL)
1368 { 1304 {
1369 /* Check sleeping threads. */ 1305 /* Check sleeping threads. */
1370 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1371
1372 do 1306 do
1373 { 1307 {
1374 /* Must make sure noone else is examining the state, wait until 1308 /* Must make sure noone else is examining the state, wait until
@@ -1414,8 +1348,6 @@ static void check_tmo_threads(void)
1414 * sleeping processes or have removed them all. */ 1348 * sleeping processes or have removed them all. */
1415 } 1349 }
1416 while (next != NULL); 1350 while (next != NULL);
1417
1418 set_irq_level(oldlevel);
1419 } 1351 }
1420 1352
1421 cores[core].next_tmo_check = next_tmo_check; 1353 cores[core].next_tmo_check = next_tmo_check;
@@ -1427,10 +1359,10 @@ static void check_tmo_threads(void)
1427 * assumed to be nonzero. 1359 * assumed to be nonzero.
1428 *--------------------------------------------------------------------------- 1360 *---------------------------------------------------------------------------
1429 */ 1361 */
1362#if NUM_CORES > 1
1430static inline void run_blocking_ops( 1363static inline void run_blocking_ops(
1431 IF_COP_VOID(unsigned int core, struct thread_entry *thread)) 1364 IF_COP_VOID(unsigned int core, struct thread_entry *thread))
1432{ 1365{
1433#if NUM_CORES > 1
1434 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops; 1366 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
1435 const unsigned flags = ops->flags; 1367 const unsigned flags = ops->flags;
1436 1368
@@ -1479,22 +1411,9 @@ static inline void run_blocking_ops(
1479 UNLOCK_THREAD(thread, ops->state); 1411 UNLOCK_THREAD(thread, ops->state);
1480 } 1412 }
1481 1413
1482 /* Reset the IRQ level */
1483 if (flags & TBOP_IRQ_LEVEL)
1484 {
1485 set_irq_level(ops->irq_level);
1486 }
1487
1488 ops->flags = 0; 1414 ops->flags = 0;
1489#else
1490 int level = cores[CURRENT_CORE].irq_level;
1491 if (level == STAY_IRQ_LEVEL)
1492 return;
1493
1494 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
1495 set_irq_level(level);
1496#endif /* NUM_CORES */
1497} 1415}
1416#endif /* NUM_CORES > 1 */
1498 1417
1499 1418
1500/*--------------------------------------------------------------------------- 1419/*---------------------------------------------------------------------------
@@ -1506,6 +1425,7 @@ static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1506{ 1425{
1507 for (;;) 1426 for (;;)
1508 { 1427 {
1428 set_irq_level(HIGHEST_IRQ_LEVEL);
1509 /* We want to do these ASAP as it may change the decision to sleep 1429 /* We want to do these ASAP as it may change the decision to sleep
1510 * the core or a core has woken because an interrupt occurred 1430 * the core or a core has woken because an interrupt occurred
1511 * and posted a message to a queue. */ 1431 * and posted a message to a queue. */
@@ -1524,16 +1444,17 @@ static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1524 1444
1525 /* If there is a ready to run task, return its ID and keep core 1445 /* If there is a ready to run task, return its ID and keep core
1526 * awake. */ 1446 * awake. */
1527 if (cores[IF_COP_CORE(core)].running != NULL) 1447 if (cores[IF_COP_CORE(core)].running == NULL)
1528 { 1448 {
1529 return cores[IF_COP_CORE(core)].running; 1449 /* Enter sleep mode to reduce power usage - woken up on interrupt
1450 * or wakeup request from another core - expected to enable all
1451 * interrupts. */
1452 core_sleep(IF_COP(core));
1453 continue;
1530 } 1454 }
1531 1455
1532 /* Enter sleep mode to reduce power usage - woken up on interrupt or 1456 set_irq_level(0);
1533 * wakeup request from another core. May abort if the waking list 1457 return cores[IF_COP_CORE(core)].running;
1534 * became populated (again). See beginning of this file for the
1535 * algorithm to atomically determine this. */
1536 core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue);
1537 } 1458 }
1538} 1459}
1539 1460
@@ -1677,8 +1598,10 @@ void switch_thread(struct thread_entry *old)
1677 if(((unsigned int *)old->stack)[0] != DEADBEEF) 1598 if(((unsigned int *)old->stack)[0] != DEADBEEF)
1678 thread_stkov(old); 1599 thread_stkov(old);
1679 1600
1601#if NUM_CORES > 1
1680 /* Run any blocking operations requested before switching/sleeping */ 1602 /* Run any blocking operations requested before switching/sleeping */
1681 run_blocking_ops(IF_COP(core, old)); 1603 run_blocking_ops(IF_COP(core, old));
1604#endif
1682 1605
1683 /* Go through the list of sleeping task to check if we need to wake up 1606 /* Go through the list of sleeping task to check if we need to wake up
1684 * any of them due to timeout. Also puts core into sleep state until 1607 * any of them due to timeout. Also puts core into sleep state until
@@ -2036,8 +1959,11 @@ static int find_empty_thread_slot(void)
2036 */ 1959 */
2037void core_idle(void) 1960void core_idle(void)
2038{ 1961{
1962#if NUM_CORES > 1
2039 const unsigned int core = CURRENT_CORE; 1963 const unsigned int core = CURRENT_CORE;
2040 core_sleep(IF_COP(core,) &cores[core].waking.queue); 1964#endif
1965 set_irq_level(HIGHEST_IRQ_LEVEL);
1966 core_sleep(IF_COP(core));
2041} 1967}
2042 1968
2043/*--------------------------------------------------------------------------- 1969/*---------------------------------------------------------------------------
@@ -2257,12 +2183,6 @@ void remove_thread(struct thread_entry *thread)
2257 { 2183 {
2258 /* Suicide - thread has unconditional rights to do this */ 2184 /* Suicide - thread has unconditional rights to do this */
2259 /* Maintain locks until switch-out */ 2185 /* Maintain locks until switch-out */
2260#if NUM_CORES > 1
2261 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2262 cores[core].blk_ops.irq_level = oldlevel;
2263#else
2264 cores[core].irq_level = oldlevel;
2265#endif
2266 block_thread_on_l(NULL, thread, STATE_KILLED); 2186 block_thread_on_l(NULL, thread, STATE_KILLED);
2267 2187
2268#if NUM_CORES > 1 2188#if NUM_CORES > 1
@@ -2389,10 +2309,6 @@ void thread_wait(struct thread_entry *thread)
2389 2309
2390 if (thread_state != STATE_KILLED) 2310 if (thread_state != STATE_KILLED)
2391 { 2311 {
2392#if NUM_CORES > 1
2393 cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
2394 cores[core].blk_ops.irq_level = oldlevel;
2395#endif
2396 /* Unlock the waitee state at task switch - not done for self-wait 2312 /* Unlock the waitee state at task switch - not done for self-wait
2397 because the would double-unlock the state and potentially 2313 because the would double-unlock the state and potentially
2398 corrupt another's busy assert on the slot */ 2314 corrupt another's busy assert on the slot */
@@ -2586,8 +2502,7 @@ unsigned int switch_core(unsigned int new_core)
2586 * that execution may resume on the new core, unlock our slot and finally 2502 * that execution may resume on the new core, unlock our slot and finally
2587 * restore the interrupt level */ 2503 * restore the interrupt level */
2588 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT | 2504 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
2589 TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL; 2505 TBOP_UNLOCK_LIST;
2590 cores[core].blk_ops.irq_level = oldlevel;
2591 cores[core].blk_ops.list_p = &cores[new_core].waking; 2506 cores[core].blk_ops.list_p = &cores[new_core].waking;
2592#if CONFIG_CORELOCK == CORELOCK_SWAP 2507#if CONFIG_CORELOCK == CORELOCK_SWAP
2593 cores[core].blk_ops.state = STATE_RUNNING; 2508 cores[core].blk_ops.state = STATE_RUNNING;
@@ -2639,9 +2554,6 @@ void init_threads(void)
2639 /* Initialize initially non-zero members of core */ 2554 /* Initialize initially non-zero members of core */
2640 thread_queue_init(&cores[core].waking); 2555 thread_queue_init(&cores[core].waking);
2641 cores[core].next_tmo_check = current_tick; /* Something not in the past */ 2556 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2642#if NUM_CORES == 1
2643 cores[core].irq_level = STAY_IRQ_LEVEL;
2644#endif
2645#ifdef HAVE_PRIORITY_SCHEDULING 2557#ifdef HAVE_PRIORITY_SCHEDULING
2646 cores[core].highest_priority = LOWEST_PRIORITY; 2558 cores[core].highest_priority = LOWEST_PRIORITY;
2647#endif 2559#endif
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
index 17ca55a694..d67fb2b9f1 100644
--- a/uisimulator/sdl/kernel.c
+++ b/uisimulator/sdl/kernel.c
@@ -269,7 +269,6 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
269 { 269 {
270 do 270 do
271 { 271 {
272 cores[CURRENT_CORE].irq_level = oldlevel;
273 block_thread(&q->queue); 272 block_thread(&q->queue);
274 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 273 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
275 } 274 }
@@ -304,7 +303,6 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
304 303
305 if (q->read == q->write && ticks > 0) 304 if (q->read == q->write && ticks > 0)
306 { 305 {
307 cores[CURRENT_CORE].irq_level = oldlevel;
308 block_thread_w_tmo(&q->queue, ticks); 306 block_thread_w_tmo(&q->queue, ticks);
309 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 307 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
310 } 308 }
@@ -379,7 +377,6 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
379 377
380 wakeup_thread(&q->queue); 378 wakeup_thread(&q->queue);
381 379
382 cores[CURRENT_CORE].irq_level = oldlevel;
383 block_thread_no_listlock(spp); 380 block_thread_no_listlock(spp);
384 return thread_get_current()->retval; 381 return thread_get_current()->retval;
385 } 382 }
diff --git a/uisimulator/sdl/thread-sdl.c b/uisimulator/sdl/thread-sdl.c
index b8297072f2..d1a8e60d01 100644
--- a/uisimulator/sdl/thread-sdl.c
+++ b/uisimulator/sdl/thread-sdl.c
@@ -135,8 +135,7 @@ bool thread_sdl_init(void *param)
135 running->name = "main"; 135 running->name = "main";
136 running->state = STATE_RUNNING; 136 running->state = STATE_RUNNING;
137 running->context.c = SDL_CreateCond(); 137 running->context.c = SDL_CreateCond();
138 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL; 138
139
140 if (running->context.c == NULL) 139 if (running->context.c == NULL)
141 { 140 {
142 fprintf(stderr, "Failed to create main condition variable\n"); 141 fprintf(stderr, "Failed to create main condition variable\n");
@@ -230,15 +229,9 @@ static void remove_from_list_l(struct thread_entry **list,
230 thread->l.next->l.prev = thread->l.prev; 229 thread->l.next->l.prev = thread->l.prev;
231} 230}
232 231
233static void run_blocking_ops(void) 232static inline void run_blocking_ops(void)
234{ 233{
235 int level = cores[CURRENT_CORE].irq_level; 234 set_irq_level(0);
236
237 if (level != STAY_IRQ_LEVEL)
238 {
239 cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
240 set_irq_level(level);
241 }
242} 235}
243 236
244struct thread_entry *thread_get_current(void) 237struct thread_entry *thread_get_current(void)