diff options
Diffstat (limited to 'firmware')
30 files changed, 3295 insertions, 835 deletions
diff --git a/firmware/backlight.c b/firmware/backlight.c index 7cbdeb45e8..cfe87b387e 100644 --- a/firmware/backlight.c +++ b/firmware/backlight.c | |||
@@ -95,7 +95,7 @@ const signed char backlight_timeout_value[19] = | |||
95 | static void backlight_thread(void); | 95 | static void backlight_thread(void); |
96 | static long backlight_stack[DEFAULT_STACK_SIZE/sizeof(long)]; | 96 | static long backlight_stack[DEFAULT_STACK_SIZE/sizeof(long)]; |
97 | static const char backlight_thread_name[] = "backlight"; | 97 | static const char backlight_thread_name[] = "backlight"; |
98 | static struct event_queue backlight_queue; | 98 | static struct event_queue backlight_queue NOCACHEBSS_ATTR; |
99 | 99 | ||
100 | static int backlight_timer; | 100 | static int backlight_timer; |
101 | static int backlight_timeout; | 101 | static int backlight_timeout; |
@@ -465,7 +465,7 @@ static void remote_backlight_update_state(void) | |||
465 | 465 | ||
466 | void backlight_thread(void) | 466 | void backlight_thread(void) |
467 | { | 467 | { |
468 | struct event ev; | 468 | struct queue_event ev; |
469 | bool locked = false; | 469 | bool locked = false; |
470 | 470 | ||
471 | while(1) | 471 | while(1) |
@@ -627,9 +627,9 @@ void backlight_init(void) | |||
627 | * status if necessary. */ | 627 | * status if necessary. */ |
628 | 628 | ||
629 | create_thread(backlight_thread, backlight_stack, | 629 | create_thread(backlight_thread, backlight_stack, |
630 | sizeof(backlight_stack), backlight_thread_name | 630 | sizeof(backlight_stack), 0, backlight_thread_name |
631 | IF_PRIO(, PRIORITY_SYSTEM) | 631 | IF_PRIO(, PRIORITY_SYSTEM) |
632 | IF_COP(, CPU, false)); | 632 | IF_COP(, CPU)); |
633 | tick_add_task(backlight_tick); | 633 | tick_add_task(backlight_tick); |
634 | } | 634 | } |
635 | 635 | ||
diff --git a/firmware/common/dircache.c b/firmware/common/dircache.c index c39dd7f7b5..b92d8fe974 100644 --- a/firmware/common/dircache.c +++ b/firmware/common/dircache.c | |||
@@ -62,7 +62,7 @@ static unsigned long reserve_used = 0; | |||
62 | static unsigned int cache_build_ticks = 0; | 62 | static unsigned int cache_build_ticks = 0; |
63 | static char dircache_cur_path[MAX_PATH*2]; | 63 | static char dircache_cur_path[MAX_PATH*2]; |
64 | 64 | ||
65 | static struct event_queue dircache_queue; | 65 | static struct event_queue dircache_queue NOCACHEBSS_ATTR; |
66 | static long dircache_stack[(DEFAULT_STACK_SIZE + 0x900)/sizeof(long)]; | 66 | static long dircache_stack[(DEFAULT_STACK_SIZE + 0x900)/sizeof(long)]; |
67 | static const char dircache_thread_name[] = "dircache"; | 67 | static const char dircache_thread_name[] = "dircache"; |
68 | 68 | ||
@@ -147,7 +147,7 @@ static struct travel_data dir_recursion[MAX_SCAN_DEPTH]; | |||
147 | */ | 147 | */ |
148 | static bool check_event_queue(void) | 148 | static bool check_event_queue(void) |
149 | { | 149 | { |
150 | struct event ev; | 150 | struct queue_event ev; |
151 | 151 | ||
152 | queue_wait_w_tmo(&dircache_queue, &ev, 0); | 152 | queue_wait_w_tmo(&dircache_queue, &ev, 0); |
153 | switch (ev.id) | 153 | switch (ev.id) |
@@ -598,7 +598,7 @@ static int dircache_do_rebuild(void) | |||
598 | */ | 598 | */ |
599 | static void dircache_thread(void) | 599 | static void dircache_thread(void) |
600 | { | 600 | { |
601 | struct event ev; | 601 | struct queue_event ev; |
602 | 602 | ||
603 | while (1) | 603 | while (1) |
604 | { | 604 | { |
@@ -701,8 +701,9 @@ void dircache_init(void) | |||
701 | 701 | ||
702 | queue_init(&dircache_queue, true); | 702 | queue_init(&dircache_queue, true); |
703 | create_thread(dircache_thread, dircache_stack, | 703 | create_thread(dircache_thread, dircache_stack, |
704 | sizeof(dircache_stack), dircache_thread_name IF_PRIO(, PRIORITY_BACKGROUND) | 704 | sizeof(dircache_stack), 0, dircache_thread_name |
705 | IF_COP(, CPU, false)); | 705 | IF_PRIO(, PRIORITY_BACKGROUND) |
706 | IF_COP(, CPU)); | ||
706 | } | 707 | } |
707 | 708 | ||
708 | /** | 709 | /** |
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c index 2c8033a533..2119216234 100644 --- a/firmware/drivers/ata.c +++ b/firmware/drivers/ata.c | |||
@@ -66,7 +66,7 @@ | |||
66 | #define ATA_POWER_OFF_TIMEOUT 2*HZ | 66 | #define ATA_POWER_OFF_TIMEOUT 2*HZ |
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | static struct mutex ata_mtx; | 69 | static struct spinlock ata_spinlock NOCACHEBSS_ATTR; |
70 | int ata_device; /* device 0 (master) or 1 (slave) */ | 70 | int ata_device; /* device 0 (master) or 1 (slave) */ |
71 | 71 | ||
72 | int ata_spinup_time = 0; | 72 | int ata_spinup_time = 0; |
@@ -83,7 +83,7 @@ static bool lba48 = false; /* set for 48 bit addressing */ | |||
83 | #endif | 83 | #endif |
84 | static long ata_stack[(DEFAULT_STACK_SIZE*3)/sizeof(long)]; | 84 | static long ata_stack[(DEFAULT_STACK_SIZE*3)/sizeof(long)]; |
85 | static const char ata_thread_name[] = "ata"; | 85 | static const char ata_thread_name[] = "ata"; |
86 | static struct event_queue ata_queue; | 86 | static struct event_queue ata_queue NOCACHEBSS_ATTR; |
87 | static bool initialized = false; | 87 | static bool initialized = false; |
88 | 88 | ||
89 | static long last_user_activity = -1; | 89 | static long last_user_activity = -1; |
@@ -234,7 +234,7 @@ int ata_read_sectors(IF_MV2(int drive,) | |||
234 | #ifdef HAVE_MULTIVOLUME | 234 | #ifdef HAVE_MULTIVOLUME |
235 | (void)drive; /* unused for now */ | 235 | (void)drive; /* unused for now */ |
236 | #endif | 236 | #endif |
237 | spinlock_lock(&ata_mtx); | 237 | spinlock_lock(&ata_spinlock); |
238 | #endif | 238 | #endif |
239 | 239 | ||
240 | last_disk_activity = current_tick; | 240 | last_disk_activity = current_tick; |
@@ -246,14 +246,14 @@ int ata_read_sectors(IF_MV2(int drive,) | |||
246 | spinup = true; | 246 | spinup = true; |
247 | if (poweroff) { | 247 | if (poweroff) { |
248 | if (ata_power_on()) { | 248 | if (ata_power_on()) { |
249 | spinlock_unlock(&ata_mtx); | 249 | spinlock_unlock(&ata_spinlock); |
250 | ata_led(false); | 250 | ata_led(false); |
251 | return -1; | 251 | return -1; |
252 | } | 252 | } |
253 | } | 253 | } |
254 | else { | 254 | else { |
255 | if (perform_soft_reset()) { | 255 | if (perform_soft_reset()) { |
256 | spinlock_unlock(&ata_mtx); | 256 | spinlock_unlock(&ata_spinlock); |
257 | ata_led(false); | 257 | ata_led(false); |
258 | return -1; | 258 | return -1; |
259 | } | 259 | } |
@@ -265,7 +265,7 @@ int ata_read_sectors(IF_MV2(int drive,) | |||
265 | SET_REG(ATA_SELECT, ata_device); | 265 | SET_REG(ATA_SELECT, ata_device); |
266 | if (!wait_for_rdy()) | 266 | if (!wait_for_rdy()) |
267 | { | 267 | { |
268 | spinlock_unlock(&ata_mtx); | 268 | spinlock_unlock(&ata_spinlock); |
269 | ata_led(false); | 269 | ata_led(false); |
270 | return -2; | 270 | return -2; |
271 | } | 271 | } |
@@ -376,7 +376,7 @@ int ata_read_sectors(IF_MV2(int drive,) | |||
376 | ata_led(false); | 376 | ata_led(false); |
377 | 377 | ||
378 | #ifndef MAX_PHYS_SECTOR_SIZE | 378 | #ifndef MAX_PHYS_SECTOR_SIZE |
379 | spinlock_unlock(&ata_mtx); | 379 | spinlock_unlock(&ata_spinlock); |
380 | #endif | 380 | #endif |
381 | 381 | ||
382 | return ret; | 382 | return ret; |
@@ -442,7 +442,7 @@ int ata_write_sectors(IF_MV2(int drive,) | |||
442 | #ifdef HAVE_MULTIVOLUME | 442 | #ifdef HAVE_MULTIVOLUME |
443 | (void)drive; /* unused for now */ | 443 | (void)drive; /* unused for now */ |
444 | #endif | 444 | #endif |
445 | spinlock_lock(&ata_mtx); | 445 | spinlock_lock(&ata_spinlock); |
446 | #endif | 446 | #endif |
447 | 447 | ||
448 | last_disk_activity = current_tick; | 448 | last_disk_activity = current_tick; |
@@ -454,14 +454,14 @@ int ata_write_sectors(IF_MV2(int drive,) | |||
454 | spinup = true; | 454 | spinup = true; |
455 | if (poweroff) { | 455 | if (poweroff) { |
456 | if (ata_power_on()) { | 456 | if (ata_power_on()) { |
457 | spinlock_unlock(&ata_mtx); | 457 | spinlock_unlock(&ata_spinlock); |
458 | ata_led(false); | 458 | ata_led(false); |
459 | return -1; | 459 | return -1; |
460 | } | 460 | } |
461 | } | 461 | } |
462 | else { | 462 | else { |
463 | if (perform_soft_reset()) { | 463 | if (perform_soft_reset()) { |
464 | spinlock_unlock(&ata_mtx); | 464 | spinlock_unlock(&ata_spinlock); |
465 | ata_led(false); | 465 | ata_led(false); |
466 | return -1; | 466 | return -1; |
467 | } | 467 | } |
@@ -471,7 +471,7 @@ int ata_write_sectors(IF_MV2(int drive,) | |||
471 | SET_REG(ATA_SELECT, ata_device); | 471 | SET_REG(ATA_SELECT, ata_device); |
472 | if (!wait_for_rdy()) | 472 | if (!wait_for_rdy()) |
473 | { | 473 | { |
474 | spinlock_unlock(&ata_mtx); | 474 | spinlock_unlock(&ata_spinlock); |
475 | ata_led(false); | 475 | ata_led(false); |
476 | return -2; | 476 | return -2; |
477 | } | 477 | } |
@@ -534,7 +534,7 @@ int ata_write_sectors(IF_MV2(int drive,) | |||
534 | ata_led(false); | 534 | ata_led(false); |
535 | 535 | ||
536 | #ifndef MAX_PHYS_SECTOR_SIZE | 536 | #ifndef MAX_PHYS_SECTOR_SIZE |
537 | spinlock_unlock(&ata_mtx); | 537 | spinlock_unlock(&ata_spinlock); |
538 | #endif | 538 | #endif |
539 | 539 | ||
540 | return ret; | 540 | return ret; |
@@ -580,7 +580,7 @@ int ata_read_sectors(IF_MV2(int drive,) | |||
580 | #ifdef HAVE_MULTIVOLUME | 580 | #ifdef HAVE_MULTIVOLUME |
581 | (void)drive; /* unused for now */ | 581 | (void)drive; /* unused for now */ |
582 | #endif | 582 | #endif |
583 | spinlock_lock(&ata_mtx); | 583 | spinlock_lock(&ata_spinlock); |
584 | 584 | ||
585 | offset = start & (phys_sector_mult - 1); | 585 | offset = start & (phys_sector_mult - 1); |
586 | 586 | ||
@@ -630,7 +630,7 @@ int ata_read_sectors(IF_MV2(int drive,) | |||
630 | } | 630 | } |
631 | 631 | ||
632 | error: | 632 | error: |
633 | spinlock_unlock(&ata_mtx); | 633 | spinlock_unlock(&ata_spinlock); |
634 | 634 | ||
635 | return rc; | 635 | return rc; |
636 | } | 636 | } |
@@ -646,7 +646,7 @@ int ata_write_sectors(IF_MV2(int drive,) | |||
646 | #ifdef HAVE_MULTIVOLUME | 646 | #ifdef HAVE_MULTIVOLUME |
647 | (void)drive; /* unused for now */ | 647 | (void)drive; /* unused for now */ |
648 | #endif | 648 | #endif |
649 | spinlock_lock(&ata_mtx); | 649 | spinlock_lock(&ata_spinlock); |
650 | 650 | ||
651 | offset = start & (phys_sector_mult - 1); | 651 | offset = start & (phys_sector_mult - 1); |
652 | 652 | ||
@@ -707,7 +707,7 @@ int ata_write_sectors(IF_MV2(int drive,) | |||
707 | } | 707 | } |
708 | 708 | ||
709 | error: | 709 | error: |
710 | spinlock_unlock(&ata_mtx); | 710 | spinlock_unlock(&ata_spinlock); |
711 | 711 | ||
712 | return rc; | 712 | return rc; |
713 | } | 713 | } |
@@ -767,13 +767,13 @@ static int ata_perform_sleep(void) | |||
767 | { | 767 | { |
768 | int ret = 0; | 768 | int ret = 0; |
769 | 769 | ||
770 | spinlock_lock(&ata_mtx); | 770 | spinlock_lock(&ata_spinlock); |
771 | 771 | ||
772 | SET_REG(ATA_SELECT, ata_device); | 772 | SET_REG(ATA_SELECT, ata_device); |
773 | 773 | ||
774 | if(!wait_for_rdy()) { | 774 | if(!wait_for_rdy()) { |
775 | DEBUGF("ata_perform_sleep() - not RDY\n"); | 775 | DEBUGF("ata_perform_sleep() - not RDY\n"); |
776 | spinlock_unlock(&ata_mtx); | 776 | spinlock_unlock(&ata_spinlock); |
777 | return -1; | 777 | return -1; |
778 | } | 778 | } |
779 | 779 | ||
@@ -786,7 +786,7 @@ static int ata_perform_sleep(void) | |||
786 | } | 786 | } |
787 | 787 | ||
788 | sleeping = true; | 788 | sleeping = true; |
789 | spinlock_unlock(&ata_mtx); | 789 | spinlock_unlock(&ata_spinlock); |
790 | return ret; | 790 | return ret; |
791 | } | 791 | } |
792 | 792 | ||
@@ -797,7 +797,7 @@ void ata_sleep(void) | |||
797 | 797 | ||
798 | void ata_sleepnow(void) | 798 | void ata_sleepnow(void) |
799 | { | 799 | { |
800 | if (!spinup && !sleeping && !ata_mtx.locked && initialized) | 800 | if (!spinup && !sleeping && !ata_spinlock.locked && initialized) |
801 | { | 801 | { |
802 | call_ata_idle_notifys(false); | 802 | call_ata_idle_notifys(false); |
803 | ata_perform_sleep(); | 803 | ata_perform_sleep(); |
@@ -812,14 +812,14 @@ void ata_spin(void) | |||
812 | static void ata_thread(void) | 812 | static void ata_thread(void) |
813 | { | 813 | { |
814 | static long last_sleep = 0; | 814 | static long last_sleep = 0; |
815 | struct event ev; | 815 | struct queue_event ev; |
816 | static long last_seen_mtx_unlock = 0; | 816 | static long last_seen_mtx_unlock = 0; |
817 | 817 | ||
818 | while (1) { | 818 | while (1) { |
819 | while ( queue_empty( &ata_queue ) ) { | 819 | while ( queue_empty( &ata_queue ) ) { |
820 | if (!spinup && !sleeping) | 820 | if (!spinup && !sleeping) |
821 | { | 821 | { |
822 | if (!ata_mtx.locked) | 822 | if (!ata_spinlock.locked) |
823 | { | 823 | { |
824 | if (!last_seen_mtx_unlock) | 824 | if (!last_seen_mtx_unlock) |
825 | last_seen_mtx_unlock = current_tick; | 825 | last_seen_mtx_unlock = current_tick; |
@@ -844,9 +844,9 @@ static void ata_thread(void) | |||
844 | if ( !spinup && sleeping && !poweroff && | 844 | if ( !spinup && sleeping && !poweroff && |
845 | TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT )) | 845 | TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT )) |
846 | { | 846 | { |
847 | spinlock_lock(&ata_mtx); | 847 | spinlock_lock(&ata_spinlock); |
848 | ide_power_enable(false); | 848 | ide_power_enable(false); |
849 | spinlock_unlock(&ata_mtx); | 849 | spinlock_unlock(&ata_spinlock); |
850 | poweroff = true; | 850 | poweroff = true; |
851 | } | 851 | } |
852 | #endif | 852 | #endif |
@@ -858,11 +858,11 @@ static void ata_thread(void) | |||
858 | #ifndef USB_NONE | 858 | #ifndef USB_NONE |
859 | case SYS_USB_CONNECTED: | 859 | case SYS_USB_CONNECTED: |
860 | if (poweroff) { | 860 | if (poweroff) { |
861 | spinlock_lock(&ata_mtx); | 861 | spinlock_lock(&ata_spinlock); |
862 | ata_led(true); | 862 | ata_led(true); |
863 | ata_power_on(); | 863 | ata_power_on(); |
864 | ata_led(false); | 864 | ata_led(false); |
865 | spinlock_unlock(&ata_mtx); | 865 | spinlock_unlock(&ata_spinlock); |
866 | } | 866 | } |
867 | 867 | ||
868 | /* Tell the USB thread that we are safe */ | 868 | /* Tell the USB thread that we are safe */ |
@@ -936,11 +936,11 @@ int ata_soft_reset(void) | |||
936 | { | 936 | { |
937 | int ret; | 937 | int ret; |
938 | 938 | ||
939 | spinlock_lock(&ata_mtx); | 939 | spinlock_lock(&ata_spinlock); |
940 | 940 | ||
941 | ret = perform_soft_reset(); | 941 | ret = perform_soft_reset(); |
942 | 942 | ||
943 | spinlock_unlock(&ata_mtx); | 943 | spinlock_unlock(&ata_spinlock); |
944 | return ret; | 944 | return ret; |
945 | } | 945 | } |
946 | 946 | ||
@@ -1131,7 +1131,7 @@ int ata_init(void) | |||
1131 | bool coldstart = ata_is_coldstart(); | 1131 | bool coldstart = ata_is_coldstart(); |
1132 | /* must be called before ata_device_init() */ | 1132 | /* must be called before ata_device_init() */ |
1133 | 1133 | ||
1134 | spinlock_init(&ata_mtx); | 1134 | spinlock_init(&ata_spinlock IF_COP(, SPINLOCK_TASK_SWITCH)); |
1135 | 1135 | ||
1136 | ata_led(false); | 1136 | ata_led(false); |
1137 | ata_device_init(); | 1137 | ata_device_init(); |
@@ -1205,9 +1205,9 @@ int ata_init(void) | |||
1205 | 1205 | ||
1206 | last_disk_activity = current_tick; | 1206 | last_disk_activity = current_tick; |
1207 | create_thread(ata_thread, ata_stack, | 1207 | create_thread(ata_thread, ata_stack, |
1208 | sizeof(ata_stack), ata_thread_name | 1208 | sizeof(ata_stack), 0, ata_thread_name |
1209 | IF_PRIO(, PRIORITY_SYSTEM) | 1209 | IF_PRIO(, PRIORITY_SYSTEM) |
1210 | IF_COP(, CPU, false)); | 1210 | IF_COP(, CPU)); |
1211 | initialized = true; | 1211 | initialized = true; |
1212 | 1212 | ||
1213 | } | 1213 | } |
diff --git a/firmware/drivers/ata_mmc.c b/firmware/drivers/ata_mmc.c index 66e60ead1d..604d1dde34 100644 --- a/firmware/drivers/ata_mmc.c +++ b/firmware/drivers/ata_mmc.c | |||
@@ -959,7 +959,7 @@ void ata_spin(void) | |||
959 | 959 | ||
960 | static void mmc_thread(void) | 960 | static void mmc_thread(void) |
961 | { | 961 | { |
962 | struct event ev; | 962 | struct queue_event ev; |
963 | bool idle_notified = false; | 963 | bool idle_notified = false; |
964 | 964 | ||
965 | while (1) { | 965 | while (1) { |
@@ -1153,8 +1153,9 @@ int ata_init(void) | |||
1153 | 1153 | ||
1154 | queue_init(&mmc_queue, true); | 1154 | queue_init(&mmc_queue, true); |
1155 | create_thread(mmc_thread, mmc_stack, | 1155 | create_thread(mmc_thread, mmc_stack, |
1156 | sizeof(mmc_stack), mmc_thread_name IF_PRIO(, PRIORITY_SYSTEM) | 1156 | sizeof(mmc_stack), 0, mmc_thread_name |
1157 | IF_COP(, CPU, false)); | 1157 | IF_PRIO(, PRIORITY_SYSTEM) |
1158 | IF_COP(, CPU)); | ||
1158 | tick_add_task(mmc_tick); | 1159 | tick_add_task(mmc_tick); |
1159 | initialized = true; | 1160 | initialized = true; |
1160 | } | 1161 | } |
diff --git a/firmware/drivers/button.c b/firmware/drivers/button.c index 851b5b9b20..25c590323a 100644 --- a/firmware/drivers/button.c +++ b/firmware/drivers/button.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #define MAX_EVENT_AGE HZ | 46 | #define MAX_EVENT_AGE HZ |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | struct event_queue button_queue; | 49 | struct event_queue button_queue NOCACHEBSS_ATTR; |
50 | 50 | ||
51 | static long lastbtn; /* Last valid button status */ | 51 | static long lastbtn; /* Last valid button status */ |
52 | static long last_read; /* Last button status, for debouncing/filtering */ | 52 | static long last_read; /* Last button status, for debouncing/filtering */ |
@@ -300,7 +300,7 @@ int button_queue_count( void ) | |||
300 | 300 | ||
301 | long button_get(bool block) | 301 | long button_get(bool block) |
302 | { | 302 | { |
303 | struct event ev; | 303 | struct queue_event ev; |
304 | int pending_count = queue_count(&button_queue); | 304 | int pending_count = queue_count(&button_queue); |
305 | 305 | ||
306 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ | 306 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ |
@@ -330,7 +330,7 @@ long button_get(bool block) | |||
330 | 330 | ||
331 | long button_get_w_tmo(int ticks) | 331 | long button_get_w_tmo(int ticks) |
332 | { | 332 | { |
333 | struct event ev; | 333 | struct queue_event ev; |
334 | 334 | ||
335 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ | 335 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ |
336 | /* Be sure to keep boosted state. */ | 336 | /* Be sure to keep boosted state. */ |
diff --git a/firmware/drivers/fat.c b/firmware/drivers/fat.c index a4fa7aa933..cfd4767032 100644 --- a/firmware/drivers/fat.c +++ b/firmware/drivers/fat.c | |||
@@ -197,7 +197,7 @@ struct fat_cache_entry | |||
197 | 197 | ||
198 | static char fat_cache_sectors[FAT_CACHE_SIZE][SECTOR_SIZE]; | 198 | static char fat_cache_sectors[FAT_CACHE_SIZE][SECTOR_SIZE]; |
199 | static struct fat_cache_entry fat_cache[FAT_CACHE_SIZE]; | 199 | static struct fat_cache_entry fat_cache[FAT_CACHE_SIZE]; |
200 | static struct mutex cache_mutex; | 200 | static struct mutex cache_mutex NOCACHEBSS_ATTR; |
201 | 201 | ||
202 | static long cluster2sec(IF_MV2(struct bpb* fat_bpb,) long cluster) | 202 | static long cluster2sec(IF_MV2(struct bpb* fat_bpb,) long cluster) |
203 | { | 203 | { |
diff --git a/firmware/export/config.h b/firmware/export/config.h index 46d4336e70..46c4d3dfd2 100644 --- a/firmware/export/config.h +++ b/firmware/export/config.h | |||
@@ -282,9 +282,13 @@ | |||
282 | #define HAVE_EXTENDED_MESSAGING_AND_NAME | 282 | #define HAVE_EXTENDED_MESSAGING_AND_NAME |
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | #if (CONFIG_CODEC == SWCODEC) && !defined(SIMULATOR) && !defined(BOOTLOADER) | 285 | #if (CONFIG_CODEC == SWCODEC) && !defined(BOOTLOADER) |
286 | #ifndef SIMULATOR | ||
286 | #define HAVE_PRIORITY_SCHEDULING | 287 | #define HAVE_PRIORITY_SCHEDULING |
287 | #define HAVE_SCHEDULER_BOOSTCTRL | 288 | #define HAVE_SCHEDULER_BOOSTCTRL |
289 | #endif /* SIMULATOR */ | ||
290 | #define HAVE_SEMAPHORE_OBJECTS | ||
291 | #define HAVE_EVENT_OBJECTS | ||
288 | #endif | 292 | #endif |
289 | 293 | ||
290 | /* define for all cpus from SH family */ | 294 | /* define for all cpus from SH family */ |
@@ -363,31 +367,70 @@ | |||
363 | #define IRAM_LCDFRAMEBUFFER | 367 | #define IRAM_LCDFRAMEBUFFER |
364 | #endif | 368 | #endif |
365 | 369 | ||
370 | /* Change this if you want to build a single-core firmware for a multicore | ||
371 | * target for debugging */ | ||
372 | #if defined(BOOTLOADER) | ||
373 | #define FORCE_SINGLE_CORE | ||
374 | #endif | ||
375 | |||
376 | /* Core locking types - specifies type of atomic operation */ | ||
377 | #define CORELOCK_NONE 0 | ||
378 | #define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm | ||
379 | and not a special semaphore instruction */ | ||
380 | #define CORELOCK_SWAP 2 /* A swap (exchange) instruction */ | ||
381 | |||
366 | /* Dual core support - not yet working on the 1G/2G and 3G iPod */ | 382 | /* Dual core support - not yet working on the 1G/2G and 3G iPod */ |
367 | #if defined(CPU_PP) | 383 | #if defined(CPU_PP) |
368 | #define IDLE_STACK_SIZE 0x80 | 384 | #define IDLE_STACK_SIZE 0x80 |
369 | #define IDLE_STACK_WORDS 0x20 | 385 | #define IDLE_STACK_WORDS 0x20 |
370 | 386 | ||
371 | #if !defined(BOOTLOADER) && CONFIG_CPU != PP5002 | 387 | #if !defined(FORCE_SINGLE_CORE) && CONFIG_CPU != PP5002 |
388 | |||
372 | #define NUM_CORES 2 | 389 | #define NUM_CORES 2 |
373 | #define CURRENT_CORE current_core() | 390 | #define CURRENT_CORE current_core() |
374 | /* Hopefully at some point we will learn how to mark areas of main memory as | 391 | /* Use IRAM for variables shared across cores - large memory buffers should |
375 | * not to be cached. Until then, use IRAM for variables shared across cores */ | 392 | * use UNCACHED_ADDR(a) and be appropriately aligned and padded */ |
376 | #define NOCACHEBSS_ATTR IBSS_ATTR | 393 | #define NOCACHEBSS_ATTR IBSS_ATTR |
377 | #define NOCACHEDATA_ATTR IDATA_ATTR | 394 | #define NOCACHEDATA_ATTR IDATA_ATTR |
378 | 395 | ||
379 | #define IF_COP(...) __VA_ARGS__ | 396 | #define IF_COP(...) __VA_ARGS__ |
397 | #define IF_COP_VOID(...) __VA_ARGS__ | ||
398 | #define IF_COP_CORE(core) core | ||
399 | |||
400 | #if CONFIG_CPU == PP5020 | ||
401 | #define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */ | ||
402 | #else | ||
403 | #define CONFIG_CORELOCK CORELOCK_SWAP | ||
404 | #endif | ||
405 | |||
380 | #endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */ | 406 | #endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */ |
407 | |||
381 | #endif /* CPU_PP */ | 408 | #endif /* CPU_PP */ |
382 | 409 | ||
410 | #ifndef CONFIG_CORELOCK | ||
411 | #define CONFIG_CORELOCK CORELOCK_NONE | ||
412 | #endif | ||
413 | |||
414 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
415 | #define IF_SWCL(...) __VA_ARGS__ | ||
416 | #define IFN_SWCL(...) | ||
417 | #else | ||
418 | #define IF_SWCL(...) | ||
419 | #define IFN_SWCL(...) __VA_ARGS__ | ||
420 | #endif /* CONFIG_CORELOCK == */ | ||
421 | |||
383 | #ifndef NUM_CORES | 422 | #ifndef NUM_CORES |
384 | /* Default to single core */ | 423 | /* Default to single core */ |
385 | #define NUM_CORES 1 | 424 | #define NUM_CORES 1 |
386 | #define CURRENT_CORE CPU | 425 | #define CURRENT_CORE CPU |
387 | #define NOCACHEBSS_ATTR | 426 | #define NOCACHEBSS_ATTR |
388 | #define NOCACHEDATA_ATTR | 427 | #define NOCACHEDATA_ATTR |
428 | #define CONFIG_CORELOCK CORELOCK_NONE | ||
389 | 429 | ||
390 | #define IF_COP(...) | 430 | #define IF_COP(...) |
431 | #define IF_COP_VOID(...) void | ||
432 | #define IF_COP_CORE(core) CURRENT_CORE | ||
433 | |||
391 | #endif /* NUM_CORES */ | 434 | #endif /* NUM_CORES */ |
392 | 435 | ||
393 | #endif /* __CONFIG_H__ */ | 436 | #endif /* __CONFIG_H__ */ |
diff --git a/firmware/export/i2c-pp.h b/firmware/export/i2c-pp.h index 3048acbaba..908db22554 100644 --- a/firmware/export/i2c-pp.h +++ b/firmware/export/i2c-pp.h | |||
@@ -45,6 +45,10 @@ | |||
45 | 45 | ||
46 | /* TODO: Fully implement i2c driver */ | 46 | /* TODO: Fully implement i2c driver */ |
47 | 47 | ||
48 | /* To be used by drivers that need to do multiple i2c operations | ||
49 | atomically */ | ||
50 | extern struct spinlock i2c_spin; | ||
51 | |||
48 | void i2c_init(void); | 52 | void i2c_init(void); |
49 | int i2c_readbyte(unsigned int dev_addr, int addr); | 53 | int i2c_readbyte(unsigned int dev_addr, int addr); |
50 | int pp_i2c_send(unsigned int addr, int data0, int data1); | 54 | int pp_i2c_send(unsigned int addr, int data0, int data1); |
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h index 3d70e49a4c..a72e004b33 100644 --- a/firmware/export/kernel.h +++ b/firmware/export/kernel.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <inttypes.h> | 23 | #include <inttypes.h> |
24 | #include "config.h" | 24 | #include "config.h" |
25 | 25 | ||
26 | #include "thread.h" | ||
27 | |||
26 | /* wrap-safe macros for tick comparison */ | 28 | /* wrap-safe macros for tick comparison */ |
27 | #define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0) | 29 | #define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0) |
28 | #define TIME_BEFORE(a,b) TIME_AFTER(b,a) | 30 | #define TIME_BEFORE(a,b) TIME_AFTER(b,a) |
@@ -31,6 +33,7 @@ | |||
31 | 33 | ||
32 | #define MAX_NUM_TICK_TASKS 8 | 34 | #define MAX_NUM_TICK_TASKS 8 |
33 | 35 | ||
36 | #define MAX_NUM_QUEUES 32 | ||
34 | #define QUEUE_LENGTH 16 /* MUST be a power of 2 */ | 37 | #define QUEUE_LENGTH 16 /* MUST be a power of 2 */ |
35 | #define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1) | 38 | #define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1) |
36 | 39 | ||
@@ -72,7 +75,7 @@ | |||
72 | #define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0) | 75 | #define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0) |
73 | #define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1) | 76 | #define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1) |
74 | 77 | ||
75 | struct event | 78 | struct queue_event |
76 | { | 79 | { |
77 | long id; | 80 | long id; |
78 | intptr_t data; | 81 | intptr_t data; |
@@ -91,20 +94,66 @@ struct queue_sender_list | |||
91 | 94 | ||
92 | struct event_queue | 95 | struct event_queue |
93 | { | 96 | { |
94 | struct event events[QUEUE_LENGTH]; | 97 | struct thread_queue queue; /* Waiter list */ |
95 | struct thread_entry *thread; | 98 | struct queue_event events[QUEUE_LENGTH]; /* list of events */ |
96 | unsigned int read; | 99 | unsigned int read; /* head of queue */ |
97 | unsigned int write; | 100 | unsigned int write; /* tail of queue */ |
98 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 101 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
99 | struct queue_sender_list *send; | 102 | struct queue_sender_list *send; /* list of threads waiting for |
103 | reply to an event */ | ||
104 | #endif | ||
105 | #if NUM_CORES > 1 | ||
106 | struct corelock cl; /* inter-core sync */ | ||
100 | #endif | 107 | #endif |
101 | }; | 108 | }; |
102 | 109 | ||
103 | struct mutex | 110 | struct mutex |
104 | { | 111 | { |
105 | uint32_t locked; | 112 | struct thread_entry *queue; /* Waiter list */ |
106 | struct thread_entry *thread; | 113 | #if CONFIG_CORELOCK == SW_CORELOCK |
114 | struct corelock cl; /* inter-core sync */ | ||
115 | #endif | ||
116 | struct thread_entry *thread; /* thread that owns lock */ | ||
117 | int count; /* lock owner recursion count */ | ||
118 | unsigned char locked; /* locked semaphore */ | ||
119 | }; | ||
120 | |||
121 | struct spinlock | ||
122 | { | ||
123 | #if NUM_CORES > 1 | ||
124 | struct corelock cl; /* inter-core sync */ | ||
125 | #endif | ||
126 | struct thread_entry *thread; /* lock owner */ | ||
127 | int count; /* lock owner recursion count */ | ||
128 | unsigned char locked; /* is locked if nonzero */ | ||
129 | #if NUM_CORES > 1 | ||
130 | unsigned char task_switch; /* can task switch? */ | ||
131 | #endif | ||
132 | }; | ||
133 | |||
134 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
135 | struct semaphore | ||
136 | { | ||
137 | struct thread_entry *queue; /* Waiter list */ | ||
138 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
139 | struct corelock cl; /* inter-core sync */ | ||
140 | #endif | ||
141 | int count; /* # of waits remaining before unsignaled */ | ||
142 | int max; /* maximum # of waits to remain signaled */ | ||
143 | }; | ||
144 | #endif | ||
145 | |||
146 | #ifdef HAVE_EVENT_OBJECTS | ||
147 | struct event | ||
148 | { | ||
149 | struct thread_entry *queues[2]; /* waiters for each state */ | ||
150 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
151 | struct corelock cl; /* inter-core sync */ | ||
152 | #endif | ||
153 | unsigned char automatic; /* event performs auto-reset */ | ||
154 | unsigned char state; /* state: 1 = signaled */ | ||
107 | }; | 155 | }; |
156 | #endif | ||
108 | 157 | ||
109 | /* global tick variable */ | 158 | /* global tick variable */ |
110 | #if defined(CPU_PP) && defined(BOOTLOADER) | 159 | #if defined(CPU_PP) && defined(BOOTLOADER) |
@@ -127,6 +176,7 @@ extern void yield(void); | |||
127 | extern void sleep(int ticks); | 176 | extern void sleep(int ticks); |
128 | int tick_add_task(void (*f)(void)); | 177 | int tick_add_task(void (*f)(void)); |
129 | int tick_remove_task(void (*f)(void)); | 178 | int tick_remove_task(void (*f)(void)); |
179 | extern void tick_start(unsigned int interval_in_ms); | ||
130 | 180 | ||
131 | struct timeout; | 181 | struct timeout; |
132 | 182 | ||
@@ -150,10 +200,17 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback, | |||
150 | int ticks, intptr_t data); | 200 | int ticks, intptr_t data); |
151 | void timeout_cancel(struct timeout *tmo); | 201 | void timeout_cancel(struct timeout *tmo); |
152 | 202 | ||
203 | #define STATE_NONSIGNALED 0 | ||
204 | #define STATE_SIGNALED 1 | ||
205 | |||
206 | #define WAIT_TIMEDOUT (-1) | ||
207 | #define WAIT_SUCCEEDED 1 | ||
208 | |||
153 | extern void queue_init(struct event_queue *q, bool register_queue); | 209 | extern void queue_init(struct event_queue *q, bool register_queue); |
154 | extern void queue_delete(struct event_queue *q); | 210 | extern void queue_delete(struct event_queue *q); |
155 | extern void queue_wait(struct event_queue *q, struct event *ev); | 211 | extern void queue_wait(struct event_queue *q, struct queue_event *ev); |
156 | extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks); | 212 | extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, |
213 | int ticks); | ||
157 | extern void queue_post(struct event_queue *q, long id, intptr_t data); | 214 | extern void queue_post(struct event_queue *q, long id, intptr_t data); |
158 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 215 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
159 | extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send); | 216 | extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send); |
@@ -168,14 +225,26 @@ extern int queue_count(const struct event_queue *q); | |||
168 | extern int queue_broadcast(long id, intptr_t data); | 225 | extern int queue_broadcast(long id, intptr_t data); |
169 | 226 | ||
170 | extern void mutex_init(struct mutex *m); | 227 | extern void mutex_init(struct mutex *m); |
171 | static inline void spinlock_init(struct mutex *m) | ||
172 | { mutex_init(m); } /* Same thing for now */ | ||
173 | extern void mutex_lock(struct mutex *m); | 228 | extern void mutex_lock(struct mutex *m); |
174 | extern void mutex_unlock(struct mutex *m); | 229 | extern void mutex_unlock(struct mutex *m); |
175 | extern void spinlock_lock(struct mutex *m); | 230 | #define SPINLOCK_TASK_SWITCH 0x10 |
176 | extern void spinlock_unlock(struct mutex *m); | 231 | #define SPINLOCK_NO_TASK_SWITCH 0x00 |
177 | extern void tick_start(unsigned int interval_in_ms); | 232 | extern void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags)); |
178 | 233 | extern void spinlock_lock(struct spinlock *l); | |
234 | extern void spinlock_unlock(struct spinlock *l); | ||
235 | extern int spinlock_lock_w_tmo(struct spinlock *l, int ticks); | ||
236 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
237 | extern void semaphore_init(struct semaphore *s, int max, int start); | ||
238 | extern void semaphore_wait(struct semaphore *s); | ||
239 | extern void semaphore_release(struct semaphore *s); | ||
240 | #endif /* HAVE_SEMAPHORE_OBJECTS */ | ||
241 | #ifdef HAVE_EVENT_OBJECTS | ||
242 | #define EVENT_AUTOMATIC 0x10 | ||
243 | #define EVENT_MANUAL 0x00 | ||
244 | extern void event_init(struct event *e, unsigned int flags); | ||
245 | extern void event_wait(struct event *e, unsigned int for_state); | ||
246 | extern void event_set_state(struct event *e, unsigned int state); | ||
247 | #endif /* HAVE_EVENT_OBJECTS */ | ||
179 | #define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) | 248 | #define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) |
180 | 249 | ||
181 | #endif | 250 | #endif /* _KERNEL_H_ */ |
diff --git a/firmware/export/pp5002.h b/firmware/export/pp5002.h index b2e02f6174..021c248690 100644 --- a/firmware/export/pp5002.h +++ b/firmware/export/pp5002.h | |||
@@ -139,6 +139,8 @@ | |||
139 | #define CPU_CTL (*(volatile unsigned char *)(0xcf004054)) | 139 | #define CPU_CTL (*(volatile unsigned char *)(0xcf004054)) |
140 | #define COP_CTL (*(volatile unsigned char *)(0xcf004058)) | 140 | #define COP_CTL (*(volatile unsigned char *)(0xcf004058)) |
141 | 141 | ||
142 | #define PROC_CTL(core) ((&CPU_CTL)[(core)*4]) | ||
143 | |||
142 | #define PROC_SLEEP 0xca | 144 | #define PROC_SLEEP 0xca |
143 | #define PROC_WAKE 0xce | 145 | #define PROC_WAKE 0xce |
144 | 146 | ||
diff --git a/firmware/export/pp5020.h b/firmware/export/pp5020.h index 5654a7de63..b591bce695 100644 --- a/firmware/export/pp5020.h +++ b/firmware/export/pp5020.h | |||
@@ -34,11 +34,15 @@ | |||
34 | /* Each processor has two mailboxes it can write to and two which | 34 | /* Each processor has two mailboxes it can write to and two which |
35 | it can read from. We define the first to be for sending messages | 35 | it can read from. We define the first to be for sending messages |
36 | and the second for replying to messages */ | 36 | and the second for replying to messages */ |
37 | #define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000)) | 37 | #define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000)) |
38 | #define COP_MESSAGE (*(volatile unsigned long *)(0x60001004)) | 38 | #define COP_MESSAGE (*(volatile unsigned long *)(0x60001004)) |
39 | #define CPU_REPLY (*(volatile unsigned long *)(0x60001008)) | 39 | #define CPU_REPLY (*(volatile unsigned long *)(0x60001008)) |
40 | #define COP_REPLY (*(volatile unsigned long *)(0x6000100c)) | 40 | #define COP_REPLY (*(volatile unsigned long *)(0x6000100c)) |
41 | #define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010)) | 41 | #define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010)) |
42 | |||
43 | /* Simple convenient array-like access */ | ||
44 | #define PROC_MESSAGE(core) ((&CPU_MESSAGE)[core]) | ||
45 | #define PROC_REPLY(core) ((&CPU_REPLY)[core]) | ||
42 | 46 | ||
43 | /* Interrupts */ | 47 | /* Interrupts */ |
44 | #define CPU_INT_STAT (*(volatile unsigned long*)(0x60004000)) | 48 | #define CPU_INT_STAT (*(volatile unsigned long*)(0x60004000)) |
@@ -142,6 +146,7 @@ | |||
142 | /* Processors Control */ | 146 | /* Processors Control */ |
143 | #define CPU_CTL (*(volatile unsigned long *)(0x60007000)) | 147 | #define CPU_CTL (*(volatile unsigned long *)(0x60007000)) |
144 | #define COP_CTL (*(volatile unsigned long *)(0x60007004)) | 148 | #define COP_CTL (*(volatile unsigned long *)(0x60007004)) |
149 | #define PROC_CTL(core) ((&CPU_CTL)[core]) | ||
145 | 150 | ||
146 | #define PROC_SLEEP 0x80000000 | 151 | #define PROC_SLEEP 0x80000000 |
147 | #define PROC_WAIT 0x40000000 | 152 | #define PROC_WAIT 0x40000000 |
diff --git a/firmware/export/system.h b/firmware/export/system.h index 24e1a2d861..dc10c4545f 100644 --- a/firmware/export/system.h +++ b/firmware/export/system.h | |||
@@ -45,6 +45,10 @@ bool detect_original_firmware(void); | |||
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ | 47 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ |
48 | #if NUM_CORES > 1 | ||
49 | extern struct spinlock boostctrl_spin; | ||
50 | #endif | ||
51 | void cpu_boost_init(void); | ||
48 | #define FREQ cpu_frequency | 52 | #define FREQ cpu_frequency |
49 | void set_cpu_frequency(long frequency); | 53 | void set_cpu_frequency(long frequency); |
50 | #ifdef CPU_BOOST_LOGGING | 54 | #ifdef CPU_BOOST_LOGGING |
diff --git a/firmware/export/thread.h b/firmware/export/thread.h index 7c683ddde5..20cde1a8e3 100644 --- a/firmware/export/thread.h +++ b/firmware/export/thread.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "config.h" | 22 | #include "config.h" |
23 | #include <inttypes.h> | 23 | #include <inttypes.h> |
24 | #include <stddef.h> | ||
24 | #include <stdbool.h> | 25 | #include <stdbool.h> |
25 | 26 | ||
26 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works | 27 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works |
@@ -31,13 +32,15 @@ | |||
31 | * can change it own priority to REALTIME to override user interface and | 32 | * can change it own priority to REALTIME to override user interface and |
32 | * prevent playback skipping. | 33 | * prevent playback skipping. |
33 | */ | 34 | */ |
35 | #define HIGHEST_PRIORITY 1 /* The highest possible thread priority */ | ||
36 | #define LOWEST_PRIORITY 100 /* The lowest possible thread priority */ | ||
34 | #define PRIORITY_REALTIME 1 | 37 | #define PRIORITY_REALTIME 1 |
35 | #define PRIORITY_USER_INTERFACE 4 /* The main thread */ | 38 | #define PRIORITY_USER_INTERFACE 4 /* The main thread */ |
36 | #define PRIORITY_RECORDING 4 /* Recording thread */ | 39 | #define PRIORITY_RECORDING 4 /* Recording thread */ |
37 | #define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */ | 40 | #define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */ |
38 | #define PRIORITY_BUFFERING 4 /* Codec buffering thread */ | 41 | #define PRIORITY_BUFFERING 4 /* Codec buffering thread */ |
39 | #define PRIORITY_SYSTEM 6 /* All other firmware threads */ | 42 | #define PRIORITY_SYSTEM 6 /* All other firmware threads */ |
40 | #define PRIORITY_BACKGROUND 8 /* Normal application threads */ | 43 | #define PRIORITY_BACKGROUND 8 /* Normal application threads */ |
41 | 44 | ||
42 | #if CONFIG_CODEC == SWCODEC | 45 | #if CONFIG_CODEC == SWCODEC |
43 | #define MAXTHREADS 16 | 46 | #define MAXTHREADS 16 |
@@ -47,6 +50,46 @@ | |||
47 | 50 | ||
48 | #define DEFAULT_STACK_SIZE 0x400 /* Bytes */ | 51 | #define DEFAULT_STACK_SIZE 0x400 /* Bytes */ |
49 | 52 | ||
53 | /** | ||
54 | * "Busy" values that can be swapped into a variable to indicate | ||
55 | * that the variable or object pointed to is in use by another processor | ||
56 | * core. When accessed, the busy value is swapped-in while the current | ||
57 | * value is atomically returned. If the swap returns the busy value, | ||
58 | * the processor should retry the operation until some other value is | ||
59 | * returned. When modification is finished, the new value should be | ||
60 | * written which unlocks it and updates it atomically. | ||
61 | * | ||
62 | * Procedure: | ||
63 | * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE); | ||
64 | * | ||
65 | * Modify/examine object at mem location or variable. Create "new_value" | ||
66 | * as suitable. | ||
67 | * | ||
68 | * variable = new_value or curr_value; | ||
69 | * | ||
70 | * To check a value for busy and perform an operation if not: | ||
71 | * curr_value = swap(&variable, BUSY_VALUE); | ||
72 | * | ||
73 | * if (curr_value != BUSY_VALUE) | ||
74 | * { | ||
75 | * Modify/examine object at mem location or variable. Create "new_value" | ||
76 | * as suitable. | ||
77 | * variable = new_value or curr_value; | ||
78 | * } | ||
79 | * else | ||
80 | * { | ||
81 | * Do nothing - already busy | ||
82 | * } | ||
83 | * | ||
84 | * Only ever restore when an actual value is returned or else it could leave | ||
85 | * the variable locked permanently if another processor unlocked in the | ||
86 | * meantime. The next access attempt would deadlock for all processors since | ||
87 | * an abandoned busy status would be left behind. | ||
88 | */ | ||
89 | #define STATE_BUSYuptr ((void*)UINTPTR_MAX) | ||
90 | #define STATE_BUSYu8 UINT8_MAX | ||
91 | #define STATE_BUSYi INT_MIN | ||
92 | |||
50 | #ifndef SIMULATOR | 93 | #ifndef SIMULATOR |
51 | /* Need to keep structures inside the header file because debug_menu | 94 | /* Need to keep structures inside the header file because debug_menu |
52 | * needs them. */ | 95 | * needs them. */ |
@@ -58,7 +101,7 @@ struct regs | |||
58 | unsigned int a[5]; /* 28-44 - a2-a6 */ | 101 | unsigned int a[5]; /* 28-44 - a2-a6 */ |
59 | void *sp; /* 48 - Stack pointer (a7) */ | 102 | void *sp; /* 48 - Stack pointer (a7) */ |
60 | void *start; /* 52 - Thread start address, or NULL when started */ | 103 | void *start; /* 52 - Thread start address, or NULL when started */ |
61 | } __attribute__((packed)); | 104 | }; |
62 | #elif CONFIG_CPU == SH7034 | 105 | #elif CONFIG_CPU == SH7034 |
63 | struct regs | 106 | struct regs |
64 | { | 107 | { |
@@ -66,7 +109,7 @@ struct regs | |||
66 | void *sp; /* 28 - Stack pointer (r15) */ | 109 | void *sp; /* 28 - Stack pointer (r15) */ |
67 | void *pr; /* 32 - Procedure register */ | 110 | void *pr; /* 32 - Procedure register */ |
68 | void *start; /* 36 - Thread start address, or NULL when started */ | 111 | void *start; /* 36 - Thread start address, or NULL when started */ |
69 | } __attribute__((packed)); | 112 | }; |
70 | #elif defined(CPU_ARM) | 113 | #elif defined(CPU_ARM) |
71 | struct regs | 114 | struct regs |
72 | { | 115 | { |
@@ -74,7 +117,7 @@ struct regs | |||
74 | void *sp; /* 32 - Stack pointer (r13) */ | 117 | void *sp; /* 32 - Stack pointer (r13) */ |
75 | unsigned int lr; /* 36 - r14 (lr) */ | 118 | unsigned int lr; /* 36 - r14 (lr) */ |
76 | void *start; /* 40 - Thread start address, or NULL when started */ | 119 | void *start; /* 40 - Thread start address, or NULL when started */ |
77 | } __attribute__((packed)); | 120 | }; |
78 | #endif /* CONFIG_CPU */ | 121 | #endif /* CONFIG_CPU */ |
79 | #else | 122 | #else |
80 | struct regs | 123 | struct regs |
@@ -85,58 +128,206 @@ struct regs | |||
85 | }; | 128 | }; |
86 | #endif /* !SIMULATOR */ | 129 | #endif /* !SIMULATOR */ |
87 | 130 | ||
88 | #define STATE_RUNNING 0x00000000 | 131 | /* NOTE: The use of the word "queue" may also refer to a linked list of |
89 | #define STATE_BLOCKED 0x20000000 | 132 | threads being maintainted that are normally dealt with in FIFO order |
90 | #define STATE_SLEEPING 0x40000000 | 133 | and not nescessarily kernel event_queue */ |
91 | #define STATE_BLOCKED_W_TMO 0x60000000 | 134 | enum |
92 | 135 | { | |
93 | #define THREAD_STATE_MASK 0x60000000 | 136 | /* States without a timeout must be first */ |
94 | #define STATE_ARG_MASK 0x1FFFFFFF | 137 | STATE_KILLED = 0, /* Thread is killed (default) */ |
95 | 138 | STATE_RUNNING, /* Thread is currently running */ | |
96 | #define GET_STATE_ARG(state) (state & STATE_ARG_MASK) | 139 | STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */ |
97 | #define GET_STATE(state) (state & THREAD_STATE_MASK) | 140 | /* These states involve adding the thread to the tmo list */ |
98 | #define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK))) | 141 | STATE_SLEEPING, /* Thread is sleeping with a timeout */ |
99 | #define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK) | 142 | STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */ |
100 | 143 | /* Miscellaneous states */ | |
101 | #define STATE_BOOSTED 0x80000000 | 144 | STATE_FROZEN, /* Thread is suspended and will not run until |
102 | #define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED) | 145 | thread_thaw is called with its ID */ |
103 | #define SET_BOOST_STATE(var) (var |= STATE_BOOSTED) | 146 | THREAD_NUM_STATES, |
104 | 147 | TIMEOUT_STATE_FIRST = STATE_SLEEPING, | |
105 | struct thread_entry { | 148 | #if NUM_CORES > 1 |
106 | struct regs context; | 149 | STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */ |
107 | const char *name; | ||
108 | void *stack; | ||
109 | unsigned long statearg; | ||
110 | unsigned short stack_size; | ||
111 | # if NUM_CORES > 1 | ||
112 | unsigned char core; /* To which core threads belongs to. */ | ||
113 | # endif | ||
114 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
115 | unsigned char priority; | ||
116 | unsigned char priority_x; | ||
117 | long last_run; | ||
118 | #endif | 150 | #endif |
119 | struct thread_entry *next, *prev; | 151 | }; |
120 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 152 | |
121 | intptr_t retval; | 153 | #if NUM_CORES > 1 |
154 | #define THREAD_DESTRUCT ((const char *)0x84905617) | ||
122 | #endif | 155 | #endif |
156 | |||
157 | /* Link information for lists thread is in */ | ||
158 | struct thread_entry; /* forward */ | ||
159 | struct thread_list | ||
160 | { | ||
161 | struct thread_entry *prev; /* Previous thread in a list */ | ||
162 | struct thread_entry *next; /* Next thread in a list */ | ||
123 | }; | 163 | }; |
124 | 164 | ||
125 | struct core_entry { | 165 | /* Small objects for core-wise mutual exclusion */ |
126 | struct thread_entry *running; | 166 | #if CONFIG_CORELOCK == SW_CORELOCK |
127 | struct thread_entry *sleeping; | 167 | /* No reliable atomic instruction available - use Peterson's algorithm */ |
128 | struct thread_entry *waking; | 168 | struct corelock |
129 | struct thread_entry **wakeup_list; | 169 | { |
170 | volatile unsigned char myl[NUM_CORES]; | ||
171 | volatile unsigned char turn; | ||
172 | } __attribute__((packed)); | ||
173 | |||
174 | void corelock_init(struct corelock *cl); | ||
175 | void corelock_lock(struct corelock *cl); | ||
176 | int corelock_try_lock(struct corelock *cl); | ||
177 | void corelock_unlock(struct corelock *cl); | ||
178 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
179 | /* Use native atomic swap/exchange instruction */ | ||
180 | struct corelock | ||
181 | { | ||
182 | unsigned char locked; | ||
183 | } __attribute__((packed)); | ||
184 | |||
185 | #define corelock_init(cl) \ | ||
186 | ({ (cl)->locked = 0; }) | ||
187 | #define corelock_lock(cl) \ | ||
188 | ({ while (test_and_set(&(cl)->locked, 1)); }) | ||
189 | #define corelock_try_lock(cl) \ | ||
190 | ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; }) | ||
191 | #define corelock_unlock(cl) \ | ||
192 | ({ (cl)->locked = 0; }) | ||
193 | #else | ||
194 | /* No atomic corelock op needed or just none defined */ | ||
195 | #define corelock_init(cl) | ||
196 | #define corelock_lock(cl) | ||
197 | #define corelock_try_lock(cl) | ||
198 | #define corelock_unlock(cl) | ||
199 | #endif /* core locking selection */ | ||
200 | |||
201 | struct thread_queue | ||
202 | { | ||
203 | struct thread_entry *queue; /* list of threads waiting - | ||
204 | _must_ be first member */ | ||
205 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
206 | struct corelock cl; /* lock for atomic list operations */ | ||
207 | #endif | ||
208 | }; | ||
209 | |||
210 | /* Information kept in each thread slot | ||
211 | * members are arranged according to size - largest first - in order | ||
212 | * to ensure both alignment and packing at the same time. | ||
213 | */ | ||
214 | struct thread_entry | ||
215 | { | ||
216 | struct regs context; /* Register context at switch - | ||
217 | _must_ be first member */ | ||
218 | void *stack; /* Pointer to top of stack */ | ||
219 | const char *name; /* Thread name */ | ||
220 | long tmo_tick; /* Tick when thread should be woken from | ||
221 | timeout */ | ||
222 | struct thread_list l; /* Links for blocked/waking/running - | ||
223 | circular linkage in both directions */ | ||
224 | struct thread_list tmo; /* Links for timeout list - | ||
225 | Self-pointer-terminated in reverse direction, | ||
226 | NULL-terminated in forward direction */ | ||
227 | struct thread_queue *bqp; /* Pointer to list variable in kernel | ||
228 | object where thread is blocked - used | ||
229 | for implicit unblock and explicit wake */ | ||
230 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
231 | struct thread_entry **bqnlp; /* Pointer to list variable in kernel | ||
232 | object where thread is blocked - non-locked | ||
233 | operations will be used */ | ||
234 | #endif | ||
235 | struct thread_entry *queue; /* List of threads waiting for thread to be | ||
236 | removed */ | ||
237 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | ||
238 | intptr_t retval; /* Return value from a blocked operation */ | ||
239 | #endif | ||
240 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
241 | long last_run; /* Last tick when started */ | ||
242 | #endif | ||
243 | unsigned short stack_size; /* Size of stack in bytes */ | ||
130 | #ifdef HAVE_PRIORITY_SCHEDULING | 244 | #ifdef HAVE_PRIORITY_SCHEDULING |
131 | long highest_priority; | 245 | unsigned char priority; /* Current priority */ |
246 | unsigned char priority_x; /* Inherited priority - right now just a | ||
247 | runtime guarantee flag */ | ||
132 | #endif | 248 | #endif |
249 | unsigned char state; /* Thread slot state (STATE_*) */ | ||
133 | #if NUM_CORES > 1 | 250 | #if NUM_CORES > 1 |
134 | volatile bool lock_issued; | 251 | unsigned char core; /* The core to which thread belongs */ |
135 | volatile bool kernel_running; | 252 | #endif |
253 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
254 | unsigned char boosted; /* CPU frequency boost flag */ | ||
255 | #endif | ||
256 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
257 | struct corelock cl; /* Corelock to lock thread slot */ | ||
258 | #endif | ||
259 | }; | ||
260 | |||
261 | #if NUM_CORES > 1 | ||
262 | /* Operations to be performed just before stopping a thread and starting | ||
263 | a new one if specified before calling switch_thread */ | ||
264 | #define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */ | ||
265 | #if CONFIG_CORELOCK == CORELOCK_SWAP | ||
266 | #define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */ | ||
267 | #define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */ | ||
268 | #define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/ | ||
269 | #endif /* CONFIG_CORELOCK */ | ||
270 | #define TBOP_UNLOCK_CORELOCK 0x04 | ||
271 | #define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */ | ||
272 | #define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */ | ||
273 | #define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */ | ||
274 | #define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */ | ||
275 | |||
276 | struct thread_blk_ops | ||
277 | { | ||
278 | int irq_level; /* new IRQ level to set */ | ||
279 | #if CONFIG_CORELOCK != SW_CORELOCK | ||
280 | union | ||
281 | { | ||
282 | int var_iv; /* int variable value to set */ | ||
283 | uint8_t var_u8v; /* unsigned char valur to set */ | ||
284 | struct thread_entry *list_v; /* list pointer queue value to set */ | ||
285 | }; | ||
286 | #endif | ||
287 | union | ||
288 | { | ||
289 | #if CONFIG_CORELOCK != SW_CORELOCK | ||
290 | int *var_ip; /* pointer to int variable */ | ||
291 | uint8_t *var_u8p; /* pointer to unsigned char varuable */ | ||
292 | #endif | ||
293 | struct thread_queue *list_p; /* pointer to list variable */ | ||
294 | }; | ||
295 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
296 | struct corelock *cl_p; /* corelock to unlock */ | ||
297 | struct thread_entry *thread; /* thread to unlock */ | ||
298 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
299 | unsigned char state; /* new thread state (performs unlock) */ | ||
300 | #endif /* SOFTWARE_CORELOCK */ | ||
301 | unsigned char flags; /* TBOP_* flags */ | ||
302 | }; | ||
303 | #endif /* NUM_CORES > 1 */ | ||
304 | |||
305 | /* Information kept for each core | ||
306 | * Member are arranged for the same reason as in thread_entry | ||
307 | */ | ||
308 | struct core_entry | ||
309 | { | ||
310 | /* "Active" lists - core is constantly active on these and are never | ||
311 | locked and interrupts do not access them */ | ||
312 | struct thread_entry *running; /* threads that are running */ | ||
313 | struct thread_entry *timeout; /* threads that are on a timeout before | ||
314 | running again */ | ||
315 | /* "Shared" lists - cores interact in a synchronized manner - access | ||
316 | is locked between cores and interrupts */ | ||
317 | struct thread_queue waking; /* intermediate locked list that | ||
318 | hold threads other core should wake up | ||
319 | on next task switch */ | ||
320 | long next_tmo_check; /* soonest time to check tmo threads */ | ||
321 | #if NUM_CORES > 1 | ||
322 | struct thread_blk_ops blk_ops; /* operations to perform when | ||
323 | blocking a thread */ | ||
324 | #else | ||
325 | #define STAY_IRQ_LEVEL (-1) | ||
326 | int irq_level; /* sets the irq level to irq_level */ | ||
327 | #endif /* NUM_CORES */ | ||
328 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
329 | unsigned char highest_priority; | ||
136 | #endif | 330 | #endif |
137 | long last_tick; | ||
138 | int switch_to_irq_level; | ||
139 | #define STAY_IRQ_LEVEL -1 | ||
140 | }; | 331 | }; |
141 | 332 | ||
142 | #ifdef HAVE_PRIORITY_SCHEDULING | 333 | #ifdef HAVE_PRIORITY_SCHEDULING |
@@ -145,82 +336,210 @@ struct core_entry { | |||
145 | #define IF_PRIO(...) | 336 | #define IF_PRIO(...) |
146 | #endif | 337 | #endif |
147 | 338 | ||
148 | /* PortalPlayer chips have 2 cores, therefore need atomic mutexes | ||
149 | * Just use it for ARM, Coldfire and whatever else well...why not? | ||
150 | */ | ||
151 | |||
152 | /* Macros generate better code than an inline function is this case */ | 339 | /* Macros generate better code than an inline function is this case */ |
153 | #if (defined (CPU_PP) || defined (CPU_ARM)) && CONFIG_CPU != PP5020 | 340 | #if (defined (CPU_PP) || defined (CPU_ARM)) |
154 | #define test_and_set(x_, v_) \ | 341 | /* atomic */ |
155 | ({ \ | 342 | #ifdef SOFTWARE_CORELOCK |
156 | uint32_t old; \ | 343 | #define test_and_set(a, v, cl) \ |
157 | asm volatile ( \ | 344 | xchg8((a), (v), (cl)) |
158 | "swpb %[old], %[v], [%[x]] \r\n" \ | 345 | /* atomic */ |
159 | : [old]"=r"(old) \ | 346 | #define xchg8(a, v, cl) \ |
160 | : [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \ | 347 | ({ uint32_t o; \ |
161 | ); \ | 348 | corelock_lock(cl); \ |
162 | old; \ | 349 | o = *(uint8_t *)(a); \ |
163 | }) | 350 | *(uint8_t *)(a) = (v); \ |
351 | corelock_unlock(cl); \ | ||
352 | o; }) | ||
353 | #define xchg32(a, v, cl) \ | ||
354 | ({ uint32_t o; \ | ||
355 | corelock_lock(cl); \ | ||
356 | o = *(uint32_t *)(a); \ | ||
357 | *(uint32_t *)(a) = (v); \ | ||
358 | corelock_unlock(cl); \ | ||
359 | o; }) | ||
360 | #define xchgptr(a, v, cl) \ | ||
361 | ({ typeof (*(a)) o; \ | ||
362 | corelock_lock(cl); \ | ||
363 | o = *(a); \ | ||
364 | *(a) = (v); \ | ||
365 | corelock_unlock(cl); \ | ||
366 | o; }) | ||
367 | #else | ||
368 | /* atomic */ | ||
369 | #define test_and_set(a, v, ...) \ | ||
370 | xchg8((a), (v)) | ||
371 | #define xchg8(a, v, ...) \ | ||
372 | ({ uint32_t o; \ | ||
373 | asm volatile( \ | ||
374 | "swpb %0, %1, [%2]" \ | ||
375 | : "=r"(o) \ | ||
376 | : "r"(v), \ | ||
377 | "r"((uint8_t*)(a))); \ | ||
378 | o; }) | ||
379 | /* atomic */ | ||
380 | #define xchg32(a, v, ...) \ | ||
381 | ({ uint32_t o; \ | ||
382 | asm volatile( \ | ||
383 | "swp %0, %1, [%2]" \ | ||
384 | : "=r"(o) \ | ||
385 | : "r"((uint32_t)(v)), \ | ||
386 | "r"((uint32_t*)(a))); \ | ||
387 | o; }) | ||
388 | /* atomic */ | ||
389 | #define xchgptr(a, v, ...) \ | ||
390 | ({ typeof (*(a)) o; \ | ||
391 | asm volatile( \ | ||
392 | "swp %0, %1, [%2]" \ | ||
393 | : "=r"(o) \ | ||
394 | : "r"(v), "r"(a)); \ | ||
395 | o; }) | ||
396 | #endif /* SOFTWARE_CORELOCK */ | ||
164 | #elif defined (CPU_COLDFIRE) | 397 | #elif defined (CPU_COLDFIRE) |
165 | #define test_and_set(x_, v_) \ | 398 | /* atomic */ |
166 | ({ \ | 399 | /* one branch will be optimized away if v is a constant expression */ |
167 | uint8_t old; \ | 400 | #define test_and_set(a, v, ...) \ |
168 | asm volatile ( \ | 401 | ({ uint32_t o = 0; \ |
169 | "bset.l %[v], (%[x]) \r\n" \ | 402 | if (v) { \ |
170 | "sne.b %[old] \r\n" \ | 403 | asm volatile ( \ |
171 | : [old]"=d,d"(old) \ | 404 | "bset.b #0, (%0)" \ |
172 | : [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \ | 405 | : : "a"((uint8_t*)(a)) \ |
173 | ); \ | 406 | : "cc"); \ |
174 | old; \ | 407 | } else { \ |
175 | }) | 408 | asm volatile ( \ |
409 | "bclr.b #0, (%0)" \ | ||
410 | : : "a"((uint8_t*)(a)) \ | ||
411 | : "cc"); \ | ||
412 | } \ | ||
413 | asm volatile ("sne.b %0" \ | ||
414 | : "+d"(o)); \ | ||
415 | o; }) | ||
176 | #elif CONFIG_CPU == SH7034 | 416 | #elif CONFIG_CPU == SH7034 |
177 | #define test_and_set(x_, v_) \ | 417 | /* atomic */ |
178 | ({ \ | 418 | #define test_and_set(a, v, ...) \ |
179 | uint32_t old; \ | 419 | ({ uint32_t o; \ |
180 | asm volatile ( \ | 420 | asm volatile ( \ |
181 | "tas.b @%[x] \r\n" \ | 421 | "tas.b @%2 \n" \ |
182 | "mov #-1, %[old] \r\n" \ | 422 | "mov #-1, %0 \n" \ |
183 | "negc %[old], %[old] \r\n" \ | 423 | "negc %0, %0 \n" \ |
184 | : [old]"=r"(old) \ | 424 | : "=r"(o) \ |
185 | : [v]"M"((uint32_t)v_), /* Value of v_ must be 1 */ \ | 425 | : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \ |
186 | [x]"r"((uint8_t *)x_) \ | 426 | "r"((uint8_t *)(a))); \ |
187 | ); \ | 427 | o; }) |
188 | old; \ | 428 | #endif /* CONFIG_CPU == */ |
189 | }) | 429 | |
190 | #else | 430 | /* defaults for no asm version */ |
191 | /* default for no asm version */ | 431 | #ifndef test_and_set |
192 | #define test_and_set(x_, v_) \ | 432 | /* not atomic */ |
193 | ({ \ | 433 | #define test_and_set(a, v, ...) \ |
194 | uint32_t old = *(uint32_t *)x_; \ | 434 | ({ uint32_t o = *(uint8_t *)(a); \ |
195 | *(uint32_t *)x_ = v_; \ | 435 | *(uint8_t *)(a) = (v); \ |
196 | old; \ | 436 | o; }) |
197 | }) | 437 | #endif /* test_and_set */ |
198 | #endif | 438 | #ifndef xchg8 |
439 | /* not atomic */ | ||
440 | #define xchg8(a, v, ...) \ | ||
441 | ({ uint32_t o = *(uint8_t *)(a); \ | ||
442 | *(uint8_t *)(a) = (v); \ | ||
443 | o; }) | ||
444 | #endif /* xchg8 */ | ||
445 | #ifndef xchg32 | ||
446 | /* not atomic */ | ||
447 | #define xchg32(a, v, ...) \ | ||
448 | ({ uint32_t o = *(uint32_t *)(a); \ | ||
449 | *(uint32_t *)(a) = (v); \ | ||
450 | o; }) | ||
451 | #endif /* xchg32 */ | ||
452 | #ifndef xchgptr | ||
453 | /* not atomic */ | ||
454 | #define xchgptr(a, v, ...) \ | ||
455 | ({ typeof (*(a)) o = *(a); \ | ||
456 | *(a) = (v); \ | ||
457 | o; }) | ||
458 | #endif /* xchgptr */ | ||
199 | 459 | ||
460 | void core_idle(void); | ||
461 | void core_wake(IF_COP_VOID(unsigned int core)); | ||
462 | |||
463 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ | ||
200 | struct thread_entry* | 464 | struct thread_entry* |
201 | create_thread(void (*function)(void), void* stack, int stack_size, | 465 | create_thread(void (*function)(void), void* stack, int stack_size, |
202 | const char *name IF_PRIO(, int priority) | 466 | unsigned flags, const char *name |
203 | IF_COP(, unsigned int core, bool fallback)); | 467 | IF_PRIO(, int priority) |
468 | IF_COP(, unsigned int core)); | ||
204 | 469 | ||
205 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 470 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
206 | void trigger_cpu_boost(void); | 471 | void trigger_cpu_boost(void); |
207 | #else | 472 | #else |
208 | #define trigger_cpu_boost() | 473 | #define trigger_cpu_boost() |
209 | #endif | 474 | #endif |
210 | 475 | void thread_thaw(struct thread_entry *thread); | |
476 | void thread_wait(struct thread_entry *thread); | ||
211 | void remove_thread(struct thread_entry *thread); | 477 | void remove_thread(struct thread_entry *thread); |
212 | void switch_thread(bool save_context, struct thread_entry **blocked_list); | 478 | void switch_thread(struct thread_entry *old); |
213 | void sleep_thread(int ticks); | 479 | void sleep_thread(int ticks); |
214 | void block_thread(struct thread_entry **thread); | 480 | |
215 | void block_thread_w_tmo(struct thread_entry **thread, int timeout); | 481 | /** |
216 | void set_irq_level_and_block_thread(struct thread_entry **thread, int level); | 482 | * Setup to allow using thread queues as locked or non-locked without speed |
217 | void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list, | 483 | * sacrifices in both core locking types. |
218 | int timeout, int level); | 484 | * |
219 | void wakeup_thread(struct thread_entry **thread); | 485 | * The blocking/waking function inline two different version of the real |
220 | void wakeup_thread_irq_safe(struct thread_entry **thread); | 486 | * function into the stubs when a software or other separate core locking |
487 | * mechanism is employed. | ||
488 | * | ||
489 | * When a simple test-and-set or similar instruction is available, locking | ||
490 | * has no cost and so one version is used and the internal worker is called | ||
491 | * directly. | ||
492 | * | ||
493 | * CORELOCK_NONE is treated the same as when an atomic instruction can be | ||
494 | * used. | ||
495 | */ | ||
496 | |||
497 | /* Blocks the current thread on a thread queue */ | ||
498 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
499 | void block_thread(struct thread_queue *tq); | ||
500 | void block_thread_no_listlock(struct thread_entry **list); | ||
501 | #else | ||
502 | void _block_thread(struct thread_queue *tq); | ||
503 | static inline void block_thread(struct thread_queue *tq) | ||
504 | { _block_thread(tq); } | ||
505 | static inline void block_thread_no_listlock(struct thread_entry **list) | ||
506 | { _block_thread((struct thread_queue *)list); } | ||
507 | #endif /* CONFIG_CORELOCK */ | ||
508 | |||
509 | /* Blocks the current thread on a thread queue for a max amount of time | ||
510 | * There is no "_no_listlock" version because timeout blocks without sync on | ||
511 | * the blocking queues is not permitted since either core could access the | ||
512 | * list at any time to do an implicit wake. In other words, objects with | ||
513 | * timeout support require lockable queues. */ | ||
514 | void block_thread_w_tmo(struct thread_queue *tq, int timeout); | ||
515 | |||
516 | /* Wakes up the thread at the head of the queue */ | ||
517 | #define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL) | ||
518 | #define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1)) | ||
519 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
520 | struct thread_entry * wakeup_thread(struct thread_queue *tq); | ||
521 | struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list); | ||
522 | #else | ||
523 | struct thread_entry * _wakeup_thread(struct thread_queue *list); | ||
524 | static inline struct thread_entry * wakeup_thread(struct thread_queue *tq) | ||
525 | { return _wakeup_thread(tq); } | ||
526 | static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list) | ||
527 | { return _wakeup_thread((struct thread_queue *)list); } | ||
528 | #endif /* CONFIG_CORELOCK */ | ||
529 | |||
530 | /* Initialize a thread_queue object. */ | ||
531 | static inline void thread_queue_init(struct thread_queue *tq) | ||
532 | { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) } | ||
533 | /* A convenience function for waking an entire queue of threads. */ | ||
534 | static inline void thread_queue_wake(struct thread_queue *tq) | ||
535 | { while (wakeup_thread(tq) != NULL); } | ||
536 | /* The no-listlock version of thread_queue_wake() */ | ||
537 | static inline void thread_queue_wake_no_listlock(struct thread_entry **list) | ||
538 | { while (wakeup_thread_no_listlock(list) != NULL); } | ||
539 | |||
221 | #ifdef HAVE_PRIORITY_SCHEDULING | 540 | #ifdef HAVE_PRIORITY_SCHEDULING |
222 | int thread_set_priority(struct thread_entry *thread, int priority); | 541 | int thread_set_priority(struct thread_entry *thread, int priority); |
223 | int thread_get_priority(struct thread_entry *thread); | 542 | int thread_get_priority(struct thread_entry *thread); |
224 | /* Yield that guarantees thread execution once per round regardless of | 543 | /* Yield that guarantees thread execution once per round regardless of |
225 | thread's scheduler priority - basically a transient realtime boost | 544 | thread's scheduler priority - basically a transient realtime boost |
226 | without altering the scheduler's thread precedence. */ | 545 | without altering the scheduler's thread precedence. */ |
@@ -228,17 +547,20 @@ void priority_yield(void); | |||
228 | #else | 547 | #else |
229 | #define priority_yield yield | 548 | #define priority_yield yield |
230 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 549 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
550 | #if NUM_CORES > 1 | ||
551 | unsigned int switch_core(unsigned int new_core); | ||
552 | #endif | ||
231 | struct thread_entry * thread_get_current(void); | 553 | struct thread_entry * thread_get_current(void); |
232 | void init_threads(void); | 554 | void init_threads(void); |
233 | int thread_stack_usage(const struct thread_entry *thread); | 555 | int thread_stack_usage(const struct thread_entry *thread); |
234 | #if NUM_CORES > 1 | 556 | #if NUM_CORES > 1 |
235 | int idle_stack_usage(unsigned int core); | 557 | int idle_stack_usage(unsigned int core); |
236 | #endif | 558 | #endif |
237 | int thread_get_status(const struct thread_entry *thread); | 559 | unsigned thread_get_status(const struct thread_entry *thread); |
238 | void thread_get_name(char *buffer, int size, | 560 | void thread_get_name(char *buffer, int size, |
239 | struct thread_entry *thread); | 561 | struct thread_entry *thread); |
240 | #ifdef RB_PROFILE | 562 | #ifdef RB_PROFILE |
241 | void profile_thread(void); | 563 | void profile_thread(void); |
242 | #endif | 564 | #endif |
243 | 565 | ||
244 | #endif | 566 | #endif /* THREAD_H */ |
diff --git a/firmware/kernel.c b/firmware/kernel.c index 1b6e9f933b..4e56c2919a 100644 --- a/firmware/kernel.c +++ b/firmware/kernel.c | |||
@@ -28,15 +28,37 @@ | |||
28 | #include "avic-imx31.h" | 28 | #include "avic-imx31.h" |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* Make this nonzero to enable more elaborate checks on objects */ | ||
32 | #ifdef DEBUG | ||
33 | #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */ | ||
34 | #else | ||
35 | #define KERNEL_OBJECT_CHECKS 0 | ||
36 | #endif | ||
37 | |||
38 | #if KERNEL_OBJECT_CHECKS | ||
39 | #define KERNEL_ASSERT(exp, msg...) \ | ||
40 | ({ if (!({ exp; })) panicf(msg); }) | ||
41 | #else | ||
42 | #define KERNEL_ASSERT(exp, msg...) ({}) | ||
43 | #endif | ||
44 | |||
31 | #if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER) | 45 | #if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER) |
32 | volatile long current_tick NOCACHEDATA_ATTR = 0; | 46 | volatile long current_tick NOCACHEDATA_ATTR = 0; |
33 | #endif | 47 | #endif |
34 | 48 | ||
35 | void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); | 49 | void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); |
36 | 50 | ||
51 | extern struct core_entry cores[NUM_CORES]; | ||
52 | |||
37 | /* This array holds all queues that are initiated. It is used for broadcast. */ | 53 | /* This array holds all queues that are initiated. It is used for broadcast. */ |
38 | static struct event_queue *all_queues[32] NOCACHEBSS_ATTR; | 54 | static struct |
39 | static int num_queues NOCACHEBSS_ATTR; | 55 | { |
56 | int count; | ||
57 | struct event_queue *queues[MAX_NUM_QUEUES]; | ||
58 | #if NUM_CORES > 1 | ||
59 | struct corelock cl; | ||
60 | #endif | ||
61 | } all_queues NOCACHEBSS_ATTR; | ||
40 | 62 | ||
41 | /**************************************************************************** | 63 | /**************************************************************************** |
42 | * Standard kernel stuff | 64 | * Standard kernel stuff |
@@ -52,8 +74,8 @@ void kernel_init(void) | |||
52 | if (CURRENT_CORE == CPU) | 74 | if (CURRENT_CORE == CPU) |
53 | { | 75 | { |
54 | memset(tick_funcs, 0, sizeof(tick_funcs)); | 76 | memset(tick_funcs, 0, sizeof(tick_funcs)); |
55 | num_queues = 0; | 77 | memset(&all_queues, 0, sizeof(all_queues)); |
56 | memset(all_queues, 0, sizeof(all_queues)); | 78 | corelock_init(&all_queues.cl); |
57 | tick_start(1000/HZ); | 79 | tick_start(1000/HZ); |
58 | } | 80 | } |
59 | } | 81 | } |
@@ -77,7 +99,7 @@ void sleep(int ticks) | |||
77 | #elif defined(CPU_PP) && defined(BOOTLOADER) | 99 | #elif defined(CPU_PP) && defined(BOOTLOADER) |
78 | unsigned stop = USEC_TIMER + ticks * (1000000/HZ); | 100 | unsigned stop = USEC_TIMER + ticks * (1000000/HZ); |
79 | while (TIME_BEFORE(USEC_TIMER, stop)) | 101 | while (TIME_BEFORE(USEC_TIMER, stop)) |
80 | switch_thread(true,NULL); | 102 | switch_thread(NULL); |
81 | #else | 103 | #else |
82 | sleep_thread(ticks); | 104 | sleep_thread(ticks); |
83 | #endif | 105 | #endif |
@@ -88,7 +110,7 @@ void yield(void) | |||
88 | #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER)) | 110 | #if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER)) |
89 | /* Some targets don't like yielding in the bootloader */ | 111 | /* Some targets don't like yielding in the bootloader */ |
90 | #else | 112 | #else |
91 | switch_thread(true, NULL); | 113 | switch_thread(NULL); |
92 | #endif | 114 | #endif |
93 | } | 115 | } |
94 | 116 | ||
@@ -104,7 +126,7 @@ static void queue_fetch_sender(struct queue_sender_list *send, | |||
104 | { | 126 | { |
105 | struct thread_entry **spp = &send->senders[i]; | 127 | struct thread_entry **spp = &send->senders[i]; |
106 | 128 | ||
107 | if (*spp) | 129 | if(*spp) |
108 | { | 130 | { |
109 | send->curr_sender = *spp; | 131 | send->curr_sender = *spp; |
110 | *spp = NULL; | 132 | *spp = NULL; |
@@ -124,18 +146,16 @@ static void queue_release_sender(struct thread_entry **sender, | |||
124 | intptr_t retval) | 146 | intptr_t retval) |
125 | { | 147 | { |
126 | (*sender)->retval = retval; | 148 | (*sender)->retval = retval; |
127 | wakeup_thread_irq_safe(sender); | 149 | wakeup_thread_no_listlock(sender); |
128 | #if 0 | ||
129 | /* This should _never_ happen - there must never be multiple | 150 | /* This should _never_ happen - there must never be multiple |
130 | threads in this list and it is a corrupt state */ | 151 | threads in this list and it is a corrupt state */ |
131 | if (*sender != NULL) | 152 | KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender); |
132 | panicf("Queue: send slot ovf"); | ||
133 | #endif | ||
134 | } | 153 | } |
135 | 154 | ||
136 | /* Releases any waiting threads that are queued with queue_send - | 155 | /* Releases any waiting threads that are queued with queue_send - |
137 | * reply with 0. | 156 | * reply with 0. |
138 | * Disable IRQs before calling since it uses queue_release_sender. | 157 | * Disable IRQs and lock before calling since it uses |
158 | * queue_release_sender. | ||
139 | */ | 159 | */ |
140 | static void queue_release_all_senders(struct event_queue *q) | 160 | static void queue_release_all_senders(struct event_queue *q) |
141 | { | 161 | { |
@@ -156,79 +176,114 @@ static void queue_release_all_senders(struct event_queue *q) | |||
156 | } | 176 | } |
157 | 177 | ||
158 | /* Enables queue_send on the specified queue - caller allocates the extra | 178 | /* Enables queue_send on the specified queue - caller allocates the extra |
159 | data structure */ | 179 | data structure. Only queues which are taken to be owned by a thread should |
180 | enable this. Public waiting is not permitted. */ | ||
160 | void queue_enable_queue_send(struct event_queue *q, | 181 | void queue_enable_queue_send(struct event_queue *q, |
161 | struct queue_sender_list *send) | 182 | struct queue_sender_list *send) |
162 | { | 183 | { |
163 | q->send = send; | 184 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
164 | memset(send, 0, sizeof(struct queue_sender_list)); | 185 | corelock_lock(&q->cl); |
186 | |||
187 | q->send = NULL; | ||
188 | if(send != NULL) | ||
189 | { | ||
190 | memset(send, 0, sizeof(*send)); | ||
191 | q->send = send; | ||
192 | } | ||
193 | |||
194 | corelock_unlock(&q->cl); | ||
195 | set_irq_level(oldlevel); | ||
165 | } | 196 | } |
166 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ | 197 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
167 | 198 | ||
168 | 199 | /* Queue must not be available for use during this call */ | |
169 | void queue_init(struct event_queue *q, bool register_queue) | 200 | void queue_init(struct event_queue *q, bool register_queue) |
170 | { | 201 | { |
202 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
203 | |||
204 | if(register_queue) | ||
205 | { | ||
206 | corelock_lock(&all_queues.cl); | ||
207 | } | ||
208 | |||
209 | corelock_init(&q->cl); | ||
210 | thread_queue_init(&q->queue); | ||
171 | q->read = 0; | 211 | q->read = 0; |
172 | q->write = 0; | 212 | q->write = 0; |
173 | q->thread = NULL; | ||
174 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 213 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
175 | q->send = NULL; /* No message sending by default */ | 214 | q->send = NULL; /* No message sending by default */ |
176 | #endif | 215 | #endif |
177 | 216 | ||
178 | if(register_queue) | 217 | if(register_queue) |
179 | { | 218 | { |
219 | if(all_queues.count >= MAX_NUM_QUEUES) | ||
220 | { | ||
221 | panicf("queue_init->out of queues"); | ||
222 | } | ||
180 | /* Add it to the all_queues array */ | 223 | /* Add it to the all_queues array */ |
181 | all_queues[num_queues++] = q; | 224 | all_queues.queues[all_queues.count++] = q; |
225 | corelock_unlock(&all_queues.cl); | ||
182 | } | 226 | } |
227 | |||
228 | set_irq_level(oldlevel); | ||
183 | } | 229 | } |
184 | 230 | ||
231 | /* Queue must not be available for use during this call */ | ||
185 | void queue_delete(struct event_queue *q) | 232 | void queue_delete(struct event_queue *q) |
186 | { | 233 | { |
234 | int oldlevel; | ||
187 | int i; | 235 | int i; |
188 | bool found = false; | ||
189 | |||
190 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
191 | 236 | ||
192 | /* Release theads waiting on queue */ | 237 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
193 | wakeup_thread(&q->thread); | 238 | corelock_lock(&all_queues.cl); |
239 | corelock_lock(&q->cl); | ||
194 | 240 | ||
195 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | ||
196 | /* Release waiting threads and reply to any dequeued message | ||
197 | waiting for one. */ | ||
198 | queue_release_all_senders(q); | ||
199 | queue_reply(q, 0); | ||
200 | #endif | ||
201 | |||
202 | /* Find the queue to be deleted */ | 241 | /* Find the queue to be deleted */ |
203 | for(i = 0;i < num_queues;i++) | 242 | for(i = 0;i < all_queues.count;i++) |
204 | { | 243 | { |
205 | if(all_queues[i] == q) | 244 | if(all_queues.queues[i] == q) |
206 | { | 245 | { |
207 | found = true; | 246 | /* Move the following queues up in the list */ |
247 | all_queues.count--; | ||
248 | |||
249 | for(;i < all_queues.count;i++) | ||
250 | { | ||
251 | all_queues.queues[i] = all_queues.queues[i+1]; | ||
252 | } | ||
253 | |||
208 | break; | 254 | break; |
209 | } | 255 | } |
210 | } | 256 | } |
211 | 257 | ||
212 | if(found) | 258 | corelock_unlock(&all_queues.cl); |
213 | { | 259 | |
214 | /* Move the following queues up in the list */ | 260 | /* Release threads waiting on queue head */ |
215 | for(;i < num_queues-1;i++) | 261 | thread_queue_wake(&q->queue); |
216 | { | 262 | |
217 | all_queues[i] = all_queues[i+1]; | 263 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
218 | } | 264 | /* Release waiting threads for reply and reply to any dequeued |
219 | 265 | message waiting for one. */ | |
220 | num_queues--; | 266 | queue_release_all_senders(q); |
221 | } | 267 | queue_reply(q, 0); |
222 | 268 | #endif | |
269 | |||
270 | q->read = 0; | ||
271 | q->write = 0; | ||
272 | |||
273 | corelock_unlock(&q->cl); | ||
223 | set_irq_level(oldlevel); | 274 | set_irq_level(oldlevel); |
224 | } | 275 | } |
225 | 276 | ||
226 | void queue_wait(struct event_queue *q, struct event *ev) | 277 | /* NOTE: multiple threads waiting on a queue head cannot have a well- |
278 | defined release order if timeouts are used. If multiple threads must | ||
279 | access the queue head, use a dispatcher or queue_wait only. */ | ||
280 | void queue_wait(struct event_queue *q, struct queue_event *ev) | ||
227 | { | 281 | { |
228 | int oldlevel; | 282 | int oldlevel; |
229 | unsigned int rd; | 283 | unsigned int rd; |
230 | 284 | ||
231 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 285 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
286 | corelock_lock(&q->cl); | ||
232 | 287 | ||
233 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 288 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
234 | if(q->send && q->send->curr_sender) | 289 | if(q->send && q->send->curr_sender) |
@@ -240,8 +295,28 @@ void queue_wait(struct event_queue *q, struct event *ev) | |||
240 | 295 | ||
241 | if (q->read == q->write) | 296 | if (q->read == q->write) |
242 | { | 297 | { |
243 | set_irq_level_and_block_thread(&q->thread, oldlevel); | 298 | do |
244 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 299 | { |
300 | #if CONFIG_CORELOCK == CORELOCK_NONE | ||
301 | cores[CURRENT_CORE].irq_level = oldlevel; | ||
302 | #elif CONFIG_CORELOCK == SW_CORELOCK | ||
303 | const unsigned int core = CURRENT_CORE; | ||
304 | cores[core].blk_ops.irq_level = oldlevel; | ||
305 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL; | ||
306 | cores[core].blk_ops.cl_p = &q->cl; | ||
307 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
308 | const unsigned int core = CURRENT_CORE; | ||
309 | cores[core].blk_ops.irq_level = oldlevel; | ||
310 | cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL; | ||
311 | cores[core].blk_ops.var_u8p = &q->cl.locked; | ||
312 | cores[core].blk_ops.var_u8v = 0; | ||
313 | #endif /* CONFIG_CORELOCK */ | ||
314 | block_thread(&q->queue); | ||
315 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
316 | corelock_lock(&q->cl); | ||
317 | } | ||
318 | /* A message that woke us could now be gone */ | ||
319 | while (q->read == q->write); | ||
245 | } | 320 | } |
246 | 321 | ||
247 | rd = q->read++ & QUEUE_LENGTH_MASK; | 322 | rd = q->read++ & QUEUE_LENGTH_MASK; |
@@ -254,13 +329,17 @@ void queue_wait(struct event_queue *q, struct event *ev) | |||
254 | queue_fetch_sender(q->send, rd); | 329 | queue_fetch_sender(q->send, rd); |
255 | } | 330 | } |
256 | #endif | 331 | #endif |
257 | 332 | ||
333 | corelock_unlock(&q->cl); | ||
258 | set_irq_level(oldlevel); | 334 | set_irq_level(oldlevel); |
259 | } | 335 | } |
260 | 336 | ||
261 | void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) | 337 | void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) |
262 | { | 338 | { |
263 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 339 | int oldlevel; |
340 | |||
341 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
342 | corelock_lock(&q->cl); | ||
264 | 343 | ||
265 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 344 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
266 | if (q->send && q->send->curr_sender) | 345 | if (q->send && q->send->curr_sender) |
@@ -269,13 +348,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) | |||
269 | queue_release_sender(&q->send->curr_sender, 0); | 348 | queue_release_sender(&q->send->curr_sender, 0); |
270 | } | 349 | } |
271 | #endif | 350 | #endif |
272 | 351 | ||
273 | if (q->read == q->write && ticks > 0) | 352 | if (q->read == q->write && ticks > 0) |
274 | { | 353 | { |
275 | set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel); | 354 | #if CONFIG_CORELOCK == CORELOCK_NONE |
355 | cores[CURRENT_CORE].irq_level = oldlevel; | ||
356 | #elif CONFIG_CORELOCK == SW_CORELOCK | ||
357 | const unsigned int core = CURRENT_CORE; | ||
358 | cores[core].blk_ops.irq_level = oldlevel; | ||
359 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL; | ||
360 | cores[core].blk_ops.cl_p = &q->cl; | ||
361 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
362 | const unsigned int core = CURRENT_CORE; | ||
363 | cores[core].blk_ops.irq_level = oldlevel; | ||
364 | cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL; | ||
365 | cores[core].blk_ops.var_u8p = &q->cl.locked; | ||
366 | cores[core].blk_ops.var_u8v = 0; | ||
367 | #endif | ||
368 | block_thread_w_tmo(&q->queue, ticks); | ||
276 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 369 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
370 | corelock_lock(&q->cl); | ||
277 | } | 371 | } |
278 | 372 | ||
373 | /* no worry about a removed message here - status is checked inside | ||
374 | locks - perhaps verify if timeout or false alarm */ | ||
279 | if (q->read != q->write) | 375 | if (q->read != q->write) |
280 | { | 376 | { |
281 | unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; | 377 | unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; |
@@ -293,15 +389,19 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) | |||
293 | { | 389 | { |
294 | ev->id = SYS_TIMEOUT; | 390 | ev->id = SYS_TIMEOUT; |
295 | } | 391 | } |
296 | 392 | ||
393 | corelock_unlock(&q->cl); | ||
297 | set_irq_level(oldlevel); | 394 | set_irq_level(oldlevel); |
298 | } | 395 | } |
299 | 396 | ||
300 | void queue_post(struct event_queue *q, long id, intptr_t data) | 397 | void queue_post(struct event_queue *q, long id, intptr_t data) |
301 | { | 398 | { |
302 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 399 | int oldlevel; |
303 | unsigned int wr; | 400 | unsigned int wr; |
304 | 401 | ||
402 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
403 | corelock_lock(&q->cl); | ||
404 | |||
305 | wr = q->write++ & QUEUE_LENGTH_MASK; | 405 | wr = q->write++ & QUEUE_LENGTH_MASK; |
306 | 406 | ||
307 | q->events[wr].id = id; | 407 | q->events[wr].id = id; |
@@ -320,20 +420,24 @@ void queue_post(struct event_queue *q, long id, intptr_t data) | |||
320 | } | 420 | } |
321 | #endif | 421 | #endif |
322 | 422 | ||
323 | wakeup_thread_irq_safe(&q->thread); | 423 | /* Wakeup a waiting thread if any */ |
424 | wakeup_thread(&q->queue); | ||
425 | |||
426 | corelock_unlock(&q->cl); | ||
324 | set_irq_level(oldlevel); | 427 | set_irq_level(oldlevel); |
325 | |||
326 | } | 428 | } |
327 | 429 | ||
328 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 430 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
329 | /* No wakeup_thread_irq_safe here because IRQ handlers are not allowed | 431 | /* IRQ handlers are not allowed use of this function - we only aim to |
330 | use of this function - we only aim to protect the queue integrity by | 432 | protect the queue integrity by turning them off. */ |
331 | turning them off. */ | ||
332 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | 433 | intptr_t queue_send(struct event_queue *q, long id, intptr_t data) |
333 | { | 434 | { |
334 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 435 | int oldlevel; |
335 | unsigned int wr; | 436 | unsigned int wr; |
336 | 437 | ||
438 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
439 | corelock_lock(&q->cl); | ||
440 | |||
337 | wr = q->write++ & QUEUE_LENGTH_MASK; | 441 | wr = q->write++ & QUEUE_LENGTH_MASK; |
338 | 442 | ||
339 | q->events[wr].id = id; | 443 | q->events[wr].id = id; |
@@ -341,21 +445,38 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
341 | 445 | ||
342 | if(q->send) | 446 | if(q->send) |
343 | { | 447 | { |
448 | const unsigned int core = CURRENT_CORE; | ||
344 | struct thread_entry **spp = &q->send->senders[wr]; | 449 | struct thread_entry **spp = &q->send->senders[wr]; |
345 | 450 | ||
346 | if (*spp) | 451 | if(*spp) |
347 | { | 452 | { |
348 | /* overflow protect - unblock any thread waiting at this index */ | 453 | /* overflow protect - unblock any thread waiting at this index */ |
349 | queue_release_sender(spp, 0); | 454 | queue_release_sender(spp, 0); |
350 | } | 455 | } |
351 | 456 | ||
352 | wakeup_thread(&q->thread); | 457 | /* Wakeup a waiting thread if any */ |
353 | set_irq_level_and_block_thread(spp, oldlevel); | 458 | wakeup_thread(&q->queue); |
354 | return thread_get_current()->retval; | 459 | |
460 | #if CONFIG_CORELOCK == CORELOCK_NONE | ||
461 | cores[core].irq_level = oldlevel; | ||
462 | #elif CONFIG_CORELOCK == SW_CORELOCK | ||
463 | cores[core].blk_ops.irq_level = oldlevel; | ||
464 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL; | ||
465 | cores[core].blk_ops.cl_p = &q->cl; | ||
466 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
467 | cores[core].blk_ops.irq_level = oldlevel; | ||
468 | cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL; | ||
469 | cores[core].blk_ops.var_u8p = &q->cl.locked; | ||
470 | cores[core].blk_ops.var_u8v = 0; | ||
471 | #endif | ||
472 | block_thread_no_listlock(spp); | ||
473 | return cores[core].running->retval; | ||
355 | } | 474 | } |
356 | 475 | ||
357 | /* Function as queue_post if sending is not enabled */ | 476 | /* Function as queue_post if sending is not enabled */ |
358 | wakeup_thread(&q->thread); | 477 | wakeup_thread(&q->queue); |
478 | |||
479 | corelock_unlock(&q->cl); | ||
359 | set_irq_level(oldlevel); | 480 | set_irq_level(oldlevel); |
360 | 481 | ||
361 | return 0; | 482 | return 0; |
@@ -365,21 +486,52 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
365 | /* Query if the last message dequeued was added by queue_send or not */ | 486 | /* Query if the last message dequeued was added by queue_send or not */ |
366 | bool queue_in_queue_send(struct event_queue *q) | 487 | bool queue_in_queue_send(struct event_queue *q) |
367 | { | 488 | { |
368 | return q->send && q->send->curr_sender; | 489 | bool in_send; |
490 | |||
491 | #if NUM_CORES > 1 | ||
492 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
493 | corelock_lock(&q->cl); | ||
494 | #endif | ||
495 | |||
496 | in_send = q->send && q->send->curr_sender; | ||
497 | |||
498 | #if NUM_CORES > 1 | ||
499 | corelock_unlock(&q->cl); | ||
500 | set_irq_level(oldlevel); | ||
501 | #endif | ||
502 | |||
503 | return in_send; | ||
369 | } | 504 | } |
370 | #endif | 505 | #endif |
371 | 506 | ||
372 | /* Replies with retval to any dequeued message sent with queue_send */ | 507 | /* Replies with retval to the last dequeued message sent with queue_send */ |
373 | void queue_reply(struct event_queue *q, intptr_t retval) | 508 | void queue_reply(struct event_queue *q, intptr_t retval) |
374 | { | 509 | { |
375 | /* No IRQ lock here since IRQs cannot change this */ | ||
376 | if(q->send && q->send->curr_sender) | 510 | if(q->send && q->send->curr_sender) |
377 | { | 511 | { |
378 | queue_release_sender(&q->send->curr_sender, retval); | 512 | #if NUM_CORES > 1 |
513 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
514 | corelock_lock(&q->cl); | ||
515 | /* Double-check locking */ | ||
516 | if(q->send && q->send->curr_sender) | ||
517 | { | ||
518 | #endif | ||
519 | |||
520 | queue_release_sender(&q->send->curr_sender, retval); | ||
521 | |||
522 | #if NUM_CORES > 1 | ||
523 | } | ||
524 | corelock_unlock(&q->cl); | ||
525 | set_irq_level(oldlevel); | ||
526 | #endif | ||
379 | } | 527 | } |
380 | } | 528 | } |
381 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ | 529 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
382 | 530 | ||
531 | /* Poll queue to see if a message exists - careful in using the result if | ||
532 | * queue_remove_from_head is called when messages are posted - possibly use | ||
533 | * queue_wait_w_tmo(&q, 0) in that case or else a removed message that | ||
534 | * unsignals the queue may cause an unwanted block */ | ||
383 | bool queue_empty(const struct event_queue* q) | 535 | bool queue_empty(const struct event_queue* q) |
384 | { | 536 | { |
385 | return ( q->read == q->write ); | 537 | return ( q->read == q->write ); |
@@ -387,23 +539,30 @@ bool queue_empty(const struct event_queue* q) | |||
387 | 539 | ||
388 | void queue_clear(struct event_queue* q) | 540 | void queue_clear(struct event_queue* q) |
389 | { | 541 | { |
390 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 542 | int oldlevel; |
543 | |||
544 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
545 | corelock_lock(&q->cl); | ||
391 | 546 | ||
392 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 547 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
393 | /* Release all thread waiting in the queue for a reply - | 548 | /* Release all threads waiting in the queue for a reply - |
394 | dequeued sent message will be handled by owning thread */ | 549 | dequeued sent message will be handled by owning thread */ |
395 | queue_release_all_senders(q); | 550 | queue_release_all_senders(q); |
396 | #endif | 551 | #endif |
397 | 552 | ||
398 | q->read = 0; | 553 | q->read = 0; |
399 | q->write = 0; | 554 | q->write = 0; |
400 | 555 | ||
556 | corelock_unlock(&q->cl); | ||
401 | set_irq_level(oldlevel); | 557 | set_irq_level(oldlevel); |
402 | } | 558 | } |
403 | 559 | ||
404 | void queue_remove_from_head(struct event_queue *q, long id) | 560 | void queue_remove_from_head(struct event_queue *q, long id) |
405 | { | 561 | { |
406 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 562 | int oldlevel; |
563 | |||
564 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
565 | corelock_lock(&q->cl); | ||
407 | 566 | ||
408 | while(q->read != q->write) | 567 | while(q->read != q->write) |
409 | { | 568 | { |
@@ -428,7 +587,8 @@ void queue_remove_from_head(struct event_queue *q, long id) | |||
428 | #endif | 587 | #endif |
429 | q->read++; | 588 | q->read++; |
430 | } | 589 | } |
431 | 590 | ||
591 | corelock_unlock(&q->cl); | ||
432 | set_irq_level(oldlevel); | 592 | set_irq_level(oldlevel); |
433 | } | 593 | } |
434 | 594 | ||
@@ -446,13 +606,23 @@ int queue_count(const struct event_queue *q) | |||
446 | int queue_broadcast(long id, intptr_t data) | 606 | int queue_broadcast(long id, intptr_t data) |
447 | { | 607 | { |
448 | int i; | 608 | int i; |
609 | |||
610 | #if NUM_CORES > 1 | ||
611 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
612 | corelock_lock(&all_queues.cl); | ||
613 | #endif | ||
449 | 614 | ||
450 | for(i = 0;i < num_queues;i++) | 615 | for(i = 0;i < all_queues.count;i++) |
451 | { | 616 | { |
452 | queue_post(all_queues[i], id, data); | 617 | queue_post(all_queues.queues[i], id, data); |
453 | } | 618 | } |
619 | |||
620 | #if NUM_CORES > 1 | ||
621 | corelock_unlock(&all_queues.cl); | ||
622 | set_irq_level(oldlevel); | ||
623 | #endif | ||
454 | 624 | ||
455 | return num_queues; | 625 | return i; |
456 | } | 626 | } |
457 | 627 | ||
458 | /**************************************************************************** | 628 | /**************************************************************************** |
@@ -567,6 +737,7 @@ void TIMER1(void) | |||
567 | { | 737 | { |
568 | int i; | 738 | int i; |
569 | 739 | ||
740 | /* Run through the list of tick tasks (using main core) */ | ||
570 | TIMER1_VAL; /* Read value to ack IRQ */ | 741 | TIMER1_VAL; /* Read value to ack IRQ */ |
571 | 742 | ||
572 | /* Run through the list of tick tasks using main CPU core - | 743 | /* Run through the list of tick tasks using main CPU core - |
@@ -580,24 +751,8 @@ void TIMER1(void) | |||
580 | } | 751 | } |
581 | 752 | ||
582 | #if NUM_CORES > 1 | 753 | #if NUM_CORES > 1 |
583 | #ifdef CPU_PP502x | 754 | /* Pulse the COP */ |
584 | { | 755 | core_wake(COP); |
585 | /* If COP is sleeping - give it a kick */ | ||
586 | /* TODO: Use a mailbox in addition to make sure it doesn't go to | ||
587 | * sleep if kicked just as it's headed to rest to make sure its | ||
588 | * tick checks won't be jittery. Don't bother at all if it owns no | ||
589 | * threads. */ | ||
590 | unsigned int cop_ctl; | ||
591 | |||
592 | cop_ctl = COP_CTL; | ||
593 | if (cop_ctl & PROC_SLEEP) | ||
594 | { | ||
595 | COP_CTL = cop_ctl & ~PROC_SLEEP; | ||
596 | } | ||
597 | } | ||
598 | #else | ||
599 | /* TODO: PP5002 */ | ||
600 | #endif | ||
601 | #endif /* NUM_CORES */ | 756 | #endif /* NUM_CORES */ |
602 | 757 | ||
603 | current_tick++; | 758 | current_tick++; |
@@ -837,49 +992,391 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback, | |||
837 | 992 | ||
838 | #endif /* INCLUDE_TIMEOUT_API */ | 993 | #endif /* INCLUDE_TIMEOUT_API */ |
839 | 994 | ||
840 | #ifndef SIMULATOR | ||
841 | /* | ||
842 | * Simulator versions in uisimulator/SIMVER/ | ||
843 | */ | ||
844 | |||
845 | /**************************************************************************** | 995 | /**************************************************************************** |
846 | * Simple mutex functions | 996 | * Simple mutex functions ;) |
847 | ****************************************************************************/ | 997 | ****************************************************************************/ |
848 | void mutex_init(struct mutex *m) | 998 | void mutex_init(struct mutex *m) |
849 | { | 999 | { |
850 | m->locked = false; | 1000 | m->queue = NULL; |
851 | m->thread = NULL; | 1001 | m->thread = NULL; |
1002 | m->count = 0; | ||
1003 | m->locked = 0; | ||
1004 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1005 | corelock_init(&m->cl); | ||
1006 | #endif | ||
852 | } | 1007 | } |
853 | 1008 | ||
854 | void mutex_lock(struct mutex *m) | 1009 | void mutex_lock(struct mutex *m) |
855 | { | 1010 | { |
856 | if (test_and_set(&m->locked, 1)) | 1011 | const unsigned int core = CURRENT_CORE; |
1012 | struct thread_entry *const thread = cores[core].running; | ||
1013 | |||
1014 | if(thread == m->thread) | ||
857 | { | 1015 | { |
858 | /* Wait until the lock is open... */ | 1016 | m->count++; |
859 | block_thread(&m->thread); | 1017 | return; |
860 | } | 1018 | } |
1019 | |||
1020 | /* Repeat some stuff here or else all the variation is too difficult to | ||
1021 | read */ | ||
1022 | #if CONFIG_CORELOCK == CORELOCK_SWAP | ||
1023 | /* peek at lock until it's no longer busy */ | ||
1024 | unsigned int locked; | ||
1025 | while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8); | ||
1026 | if(locked == 0) | ||
1027 | { | ||
1028 | m->thread = thread; | ||
1029 | m->locked = 1; | ||
1030 | return; | ||
1031 | } | ||
1032 | |||
1033 | /* Block until the lock is open... */ | ||
1034 | cores[core].blk_ops.flags = TBOP_SET_VARu8; | ||
1035 | cores[core].blk_ops.var_u8p = &m->locked; | ||
1036 | cores[core].blk_ops.var_u8v = 1; | ||
1037 | #else | ||
1038 | corelock_lock(&m->cl); | ||
1039 | if (m->locked == 0) | ||
1040 | { | ||
1041 | m->locked = 1; | ||
1042 | m->thread = thread; | ||
1043 | corelock_unlock(&m->cl); | ||
1044 | return; | ||
1045 | } | ||
1046 | |||
1047 | /* Block until the lock is open... */ | ||
1048 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1049 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; | ||
1050 | cores[core].blk_ops.cl_p = &m->cl; | ||
1051 | #endif | ||
1052 | #endif /* CONFIG_CORELOCK */ | ||
1053 | |||
1054 | block_thread_no_listlock(&m->queue); | ||
861 | } | 1055 | } |
862 | 1056 | ||
863 | void mutex_unlock(struct mutex *m) | 1057 | void mutex_unlock(struct mutex *m) |
864 | { | 1058 | { |
865 | if (m->thread == NULL) | 1059 | /* unlocker not being the owner is an unlocking violation */ |
866 | m->locked = 0; | 1060 | KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running, |
1061 | "mutex_unlock->wrong thread (recurse)"); | ||
1062 | |||
1063 | if(m->count > 0) | ||
1064 | { | ||
1065 | /* this thread still owns lock */ | ||
1066 | m->count--; | ||
1067 | return; | ||
1068 | } | ||
1069 | |||
1070 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1071 | /* lock out other cores */ | ||
1072 | corelock_lock(&m->cl); | ||
1073 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1074 | /* wait for peeker to move on */ | ||
1075 | while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8); | ||
1076 | #endif | ||
1077 | |||
1078 | /* transfer to next queued thread if any */ | ||
1079 | m->thread = wakeup_thread_no_listlock(&m->queue); | ||
1080 | |||
1081 | if(m->thread == NULL) | ||
1082 | { | ||
1083 | m->locked = 0; /* release lock */ | ||
1084 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1085 | corelock_unlock(&m->cl); | ||
1086 | #endif | ||
1087 | } | ||
1088 | else /* another thread is waiting - remain locked */ | ||
1089 | { | ||
1090 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1091 | corelock_unlock(&m->cl); | ||
1092 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1093 | m->locked = 1; | ||
1094 | #endif | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | /**************************************************************************** | ||
1099 | * Simpl-er mutex functions ;) | ||
1100 | ****************************************************************************/ | ||
1101 | void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags)) | ||
1102 | { | ||
1103 | l->locked = 0; | ||
1104 | l->thread = NULL; | ||
1105 | l->count = 0; | ||
1106 | #if NUM_CORES > 1 | ||
1107 | l->task_switch = flags & SPINLOCK_TASK_SWITCH; | ||
1108 | corelock_init(&l->cl); | ||
1109 | #endif | ||
1110 | } | ||
1111 | |||
1112 | void spinlock_lock(struct spinlock *l) | ||
1113 | { | ||
1114 | struct thread_entry *const thread = cores[CURRENT_CORE].running; | ||
1115 | |||
1116 | if (l->thread == thread) | ||
1117 | { | ||
1118 | l->count++; | ||
1119 | return; | ||
1120 | } | ||
1121 | |||
1122 | #if NUM_CORES > 1 | ||
1123 | if (l->task_switch != 0) | ||
1124 | #endif | ||
1125 | { | ||
1126 | /* Let other threads run until the lock is free */ | ||
1127 | while(test_and_set(&l->locked, 1, &l->cl) != 0) | ||
1128 | { | ||
1129 | /* spin and switch until the lock is open... */ | ||
1130 | switch_thread(NULL); | ||
1131 | } | ||
1132 | } | ||
1133 | #if NUM_CORES > 1 | ||
867 | else | 1134 | else |
868 | wakeup_thread(&m->thread); | 1135 | { |
1136 | /* Use the corelock purely */ | ||
1137 | corelock_lock(&l->cl); | ||
1138 | } | ||
1139 | #endif | ||
1140 | |||
1141 | l->thread = thread; | ||
869 | } | 1142 | } |
870 | 1143 | ||
871 | void spinlock_lock(struct mutex *m) | 1144 | void spinlock_unlock(struct spinlock *l) |
872 | { | 1145 | { |
873 | while (test_and_set(&m->locked, 1)) | 1146 | /* unlocker not being the owner is an unlocking violation */ |
1147 | KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running, | ||
1148 | "spinlock_unlock->wrong thread"); | ||
1149 | |||
1150 | if (l->count > 0) | ||
1151 | { | ||
1152 | /* this thread still owns lock */ | ||
1153 | l->count--; | ||
1154 | return; | ||
1155 | } | ||
1156 | |||
1157 | /* clear owner */ | ||
1158 | l->thread = NULL; | ||
1159 | |||
1160 | #if NUM_CORES > 1 | ||
1161 | if (l->task_switch != 0) | ||
1162 | #endif | ||
874 | { | 1163 | { |
875 | /* wait until the lock is open... */ | 1164 | /* release lock */ |
876 | switch_thread(true, NULL); | 1165 | #if CONFIG_CORELOCK == SW_CORELOCK |
1166 | /* This must be done since our unlock could be missed by the | ||
1167 | test_and_set and leave the object locked permanently */ | ||
1168 | corelock_lock(&l->cl); | ||
1169 | #endif | ||
1170 | l->locked = 0; | ||
877 | } | 1171 | } |
1172 | |||
1173 | #if NUM_CORES > 1 | ||
1174 | corelock_unlock(&l->cl); | ||
1175 | #endif | ||
878 | } | 1176 | } |
879 | 1177 | ||
880 | void spinlock_unlock(struct mutex *m) | 1178 | /**************************************************************************** |
1179 | * Simple semaphore functions ;) | ||
1180 | ****************************************************************************/ | ||
1181 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
1182 | void semaphore_init(struct semaphore *s, int max, int start) | ||
881 | { | 1183 | { |
882 | m->locked = 0; | 1184 | KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, |
1185 | "semaphore_init->inv arg"); | ||
1186 | s->queue = NULL; | ||
1187 | s->max = max; | ||
1188 | s->count = start; | ||
1189 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1190 | corelock_init(&s->cl); | ||
1191 | #endif | ||
883 | } | 1192 | } |
884 | 1193 | ||
885 | #endif /* ndef SIMULATOR */ | 1194 | void semaphore_wait(struct semaphore *s) |
1195 | { | ||
1196 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK | ||
1197 | corelock_lock(&s->cl); | ||
1198 | if(--s->count >= 0) | ||
1199 | { | ||
1200 | corelock_unlock(&s->cl); | ||
1201 | return; | ||
1202 | } | ||
1203 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1204 | int count; | ||
1205 | while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi); | ||
1206 | if(--count >= 0) | ||
1207 | { | ||
1208 | s->count = count; | ||
1209 | return; | ||
1210 | } | ||
1211 | #endif | ||
1212 | |||
1213 | /* too many waits - block until dequeued */ | ||
1214 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1215 | const unsigned int core = CURRENT_CORE; | ||
1216 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; | ||
1217 | cores[core].blk_ops.cl_p = &s->cl; | ||
1218 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1219 | const unsigned int core = CURRENT_CORE; | ||
1220 | cores[core].blk_ops.flags = TBOP_SET_VARi; | ||
1221 | cores[core].blk_ops.var_ip = &s->count; | ||
1222 | cores[core].blk_ops.var_iv = count; | ||
1223 | #endif | ||
1224 | block_thread_no_listlock(&s->queue); | ||
1225 | } | ||
1226 | |||
1227 | void semaphore_release(struct semaphore *s) | ||
1228 | { | ||
1229 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK | ||
1230 | corelock_lock(&s->cl); | ||
1231 | if (s->count < s->max) | ||
1232 | { | ||
1233 | if (++s->count <= 0) | ||
1234 | { | ||
1235 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1236 | int count; | ||
1237 | while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi); | ||
1238 | if(count < s->max) | ||
1239 | { | ||
1240 | if(++count <= 0) | ||
1241 | { | ||
1242 | #endif /* CONFIG_CORELOCK */ | ||
1243 | |||
1244 | /* there should be threads in this queue */ | ||
1245 | KERNEL_ASSERT(s->queue.queue != NULL, "semaphore->wakeup"); | ||
1246 | /* a thread was queued - wake it up */ | ||
1247 | wakeup_thread_no_listlock(&s->queue); | ||
1248 | } | ||
1249 | } | ||
1250 | |||
1251 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1252 | corelock_unlock(&s->cl); | ||
1253 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1254 | s->count = count; | ||
1255 | #endif | ||
1256 | } | ||
1257 | #endif /* HAVE_SEMAPHORE_OBJECTS */ | ||
1258 | |||
1259 | /**************************************************************************** | ||
1260 | * Simple event functions ;) | ||
1261 | ****************************************************************************/ | ||
1262 | #ifdef HAVE_EVENT_OBJECTS | ||
1263 | void event_init(struct event *e, unsigned int flags) | ||
1264 | { | ||
1265 | e->queues[STATE_NONSIGNALED] = NULL; | ||
1266 | e->queues[STATE_SIGNALED] = NULL; | ||
1267 | e->state = flags & STATE_SIGNALED; | ||
1268 | e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0; | ||
1269 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1270 | corelock_init(&e->cl); | ||
1271 | #endif | ||
1272 | } | ||
1273 | |||
1274 | void event_wait(struct event *e, unsigned int for_state) | ||
1275 | { | ||
1276 | unsigned int last_state; | ||
1277 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK | ||
1278 | corelock_lock(&e->cl); | ||
1279 | last_state = e->state; | ||
1280 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1281 | while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8); | ||
1282 | #endif | ||
1283 | |||
1284 | if(e->automatic != 0) | ||
1285 | { | ||
1286 | /* wait for false always satisfied by definition | ||
1287 | or if it just changed to false */ | ||
1288 | if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED) | ||
1289 | { | ||
1290 | /* automatic - unsignal */ | ||
1291 | e->state = STATE_NONSIGNALED; | ||
1292 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1293 | corelock_unlock(&e->cl); | ||
1294 | #endif | ||
1295 | return; | ||
1296 | } | ||
1297 | /* block until state matches */ | ||
1298 | } | ||
1299 | else if(for_state == last_state) | ||
1300 | { | ||
1301 | /* the state being waited for is the current state */ | ||
1302 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1303 | corelock_unlock(&e->cl); | ||
1304 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1305 | e->state = last_state; | ||
1306 | #endif | ||
1307 | return; | ||
1308 | } | ||
1309 | |||
1310 | { | ||
1311 | /* current state does not match wait-for state */ | ||
1312 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1313 | const unsigned int core = CURRENT_CORE; | ||
1314 | cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; | ||
1315 | cores[core].blk_ops.cl_p = &e->cl; | ||
1316 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1317 | const unsigned int core = CURRENT_CORE; | ||
1318 | cores[core].blk_ops.flags = TBOP_SET_VARu8; | ||
1319 | cores[core].blk_ops.var_u8p = &e->state; | ||
1320 | cores[core].blk_ops.var_u8v = last_state; | ||
1321 | #endif | ||
1322 | block_thread_no_listlock(&e->queues[for_state]); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1326 | void event_set_state(struct event *e, unsigned int state) | ||
1327 | { | ||
1328 | unsigned int last_state; | ||
1329 | #if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK | ||
1330 | corelock_lock(&e->cl); | ||
1331 | last_state = e->state; | ||
1332 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1333 | while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8); | ||
1334 | #endif | ||
1335 | |||
1336 | if(last_state == state) | ||
1337 | { | ||
1338 | /* no change */ | ||
1339 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1340 | corelock_unlock(&e->cl); | ||
1341 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1342 | e->state = last_state; | ||
1343 | #endif | ||
1344 | return; | ||
1345 | } | ||
1346 | |||
1347 | if(state == STATE_SIGNALED) | ||
1348 | { | ||
1349 | if(e->automatic != 0) | ||
1350 | { | ||
1351 | struct thread_entry *thread; | ||
1352 | /* no thread should have ever blocked for unsignaled */ | ||
1353 | KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL, | ||
1354 | "set_event_state->queue[NS]:S"); | ||
1355 | /* pass to next thread and keep unsignaled - "pulse" */ | ||
1356 | thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]); | ||
1357 | e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED; | ||
1358 | } | ||
1359 | else | ||
1360 | { | ||
1361 | /* release all threads waiting for signaled */ | ||
1362 | thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]); | ||
1363 | e->state = STATE_SIGNALED; | ||
1364 | } | ||
1365 | } | ||
1366 | else | ||
1367 | { | ||
1368 | /* release all threads waiting for unsignaled */ | ||
1369 | |||
1370 | /* no thread should have ever blocked if automatic */ | ||
1371 | KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL || | ||
1372 | e->automatic == 0, "set_event_state->queue[NS]:NS"); | ||
1373 | |||
1374 | thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]); | ||
1375 | e->state = STATE_NONSIGNALED; | ||
1376 | } | ||
1377 | |||
1378 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1379 | corelock_unlock(&e->cl); | ||
1380 | #endif | ||
1381 | } | ||
1382 | #endif /* HAVE_EVENT_OBJECTS */ | ||
diff --git a/firmware/mpeg.c b/firmware/mpeg.c index 65fb024db3..a4632aae38 100644 --- a/firmware/mpeg.c +++ b/firmware/mpeg.c | |||
@@ -1230,7 +1230,7 @@ static void mpeg_thread(void) | |||
1230 | { | 1230 | { |
1231 | static int pause_tick = 0; | 1231 | static int pause_tick = 0; |
1232 | static unsigned int pause_track = 0; | 1232 | static unsigned int pause_track = 0; |
1233 | struct event ev; | 1233 | struct queue_event ev; |
1234 | int len; | 1234 | int len; |
1235 | int free_space_left; | 1235 | int free_space_left; |
1236 | int unplayed_space_left; | 1236 | int unplayed_space_left; |
@@ -2910,8 +2910,9 @@ void audio_init(void) | |||
2910 | queue_init(&mpeg_queue, true); | 2910 | queue_init(&mpeg_queue, true); |
2911 | #endif /* !SIMULATOR */ | 2911 | #endif /* !SIMULATOR */ |
2912 | create_thread(mpeg_thread, mpeg_stack, | 2912 | create_thread(mpeg_thread, mpeg_stack, |
2913 | sizeof(mpeg_stack), mpeg_thread_name IF_PRIO(, PRIORITY_SYSTEM) | 2913 | sizeof(mpeg_stack), 0, mpeg_thread_name |
2914 | IF_COP(, CPU, false)); | 2914 | IF_PRIO(, PRIORITY_SYSTEM) |
2915 | IF_COP(, CPU)); | ||
2915 | 2916 | ||
2916 | memset(trackdata, sizeof(trackdata), 0); | 2917 | memset(trackdata, sizeof(trackdata), 0); |
2917 | 2918 | ||
diff --git a/firmware/pcm_record.c b/firmware/pcm_record.c index 361689de3a..c2d2719d05 100644 --- a/firmware/pcm_record.c +++ b/firmware/pcm_record.c | |||
@@ -213,8 +213,8 @@ enum | |||
213 | 213 | ||
214 | /***************************************************************************/ | 214 | /***************************************************************************/ |
215 | 215 | ||
216 | static struct event_queue pcmrec_queue; | 216 | static struct event_queue pcmrec_queue NOCACHEBSS_ATTR; |
217 | static struct queue_sender_list pcmrec_queue_send; | 217 | static struct queue_sender_list pcmrec_queue_send NOCACHEBSS_ATTR; |
218 | static long pcmrec_stack[3*DEFAULT_STACK_SIZE/sizeof(long)]; | 218 | static long pcmrec_stack[3*DEFAULT_STACK_SIZE/sizeof(long)]; |
219 | static const char pcmrec_thread_name[] = "pcmrec"; | 219 | static const char pcmrec_thread_name[] = "pcmrec"; |
220 | static struct thread_entry *pcmrec_thread_p; | 220 | static struct thread_entry *pcmrec_thread_p; |
@@ -365,8 +365,8 @@ void pcm_rec_init(void) | |||
365 | queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send); | 365 | queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send); |
366 | pcmrec_thread_p = | 366 | pcmrec_thread_p = |
367 | create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack), | 367 | create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack), |
368 | pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING) | 368 | 0, pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING) |
369 | IF_COP(, CPU, false)); | 369 | IF_COP(, CPU)); |
370 | } /* pcm_rec_init */ | 370 | } /* pcm_rec_init */ |
371 | 371 | ||
372 | /** audio_* group **/ | 372 | /** audio_* group **/ |
@@ -1437,7 +1437,7 @@ static void pcmrec_resume(void) | |||
1437 | static void pcmrec_thread(void) __attribute__((noreturn)); | 1437 | static void pcmrec_thread(void) __attribute__((noreturn)); |
1438 | static void pcmrec_thread(void) | 1438 | static void pcmrec_thread(void) |
1439 | { | 1439 | { |
1440 | struct event ev; | 1440 | struct queue_event ev; |
1441 | 1441 | ||
1442 | logf("thread pcmrec start"); | 1442 | logf("thread pcmrec start"); |
1443 | 1443 | ||
diff --git a/firmware/powermgmt.c b/firmware/powermgmt.c index bb88fce318..fcc3030861 100644 --- a/firmware/powermgmt.c +++ b/firmware/powermgmt.c | |||
@@ -1103,9 +1103,9 @@ void powermgmt_init(void) | |||
1103 | { | 1103 | { |
1104 | /* init history to 0 */ | 1104 | /* init history to 0 */ |
1105 | memset(power_history, 0x00, sizeof(power_history)); | 1105 | memset(power_history, 0x00, sizeof(power_history)); |
1106 | create_thread(power_thread, power_stack, sizeof(power_stack), | 1106 | create_thread(power_thread, power_stack, sizeof(power_stack), 0, |
1107 | power_thread_name IF_PRIO(, PRIORITY_SYSTEM) | 1107 | power_thread_name IF_PRIO(, PRIORITY_SYSTEM) |
1108 | IF_COP(, CPU, false)); | 1108 | IF_COP(, CPU)); |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | #endif /* SIMULATOR */ | 1111 | #endif /* SIMULATOR */ |
diff --git a/firmware/rolo.c b/firmware/rolo.c index fa1748341b..2a4b753948 100644 --- a/firmware/rolo.c +++ b/firmware/rolo.c | |||
@@ -63,8 +63,8 @@ void rolo_restart_cop(void) | |||
63 | { | 63 | { |
64 | /* There should be free thread slots aplenty */ | 64 | /* There should be free thread slots aplenty */ |
65 | create_thread(rolo_restart_cop, cop_idlestackbegin, IDLE_STACK_SIZE, | 65 | create_thread(rolo_restart_cop, cop_idlestackbegin, IDLE_STACK_SIZE, |
66 | "rolo COP" IF_PRIO(, PRIORITY_REALTIME) | 66 | 0, "rolo COP" IF_PRIO(, PRIORITY_REALTIME) |
67 | IF_COP(, COP, false)); | 67 | IF_COP(, COP)); |
68 | return; | 68 | return; |
69 | } | 69 | } |
70 | 70 | ||
diff --git a/firmware/scroll_engine.c b/firmware/scroll_engine.c index 63ca8883de..7c66601d6a 100644 --- a/firmware/scroll_engine.c +++ b/firmware/scroll_engine.c | |||
@@ -46,7 +46,7 @@ struct scrollinfo lcd_scroll[LCD_SCROLLABLE_LINES]; | |||
46 | 46 | ||
47 | #ifdef HAVE_REMOTE_LCD | 47 | #ifdef HAVE_REMOTE_LCD |
48 | struct scrollinfo lcd_remote_scroll[LCD_REMOTE_SCROLLABLE_LINES]; | 48 | struct scrollinfo lcd_remote_scroll[LCD_REMOTE_SCROLLABLE_LINES]; |
49 | struct event_queue scroll_queue; | 49 | struct event_queue scroll_queue NOCACHEBSS_ATTR; |
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | struct scroll_screen_info lcd_scroll_info = | 52 | struct scroll_screen_info lcd_scroll_info = |
@@ -150,7 +150,7 @@ static void sync_display_ticks(void) | |||
150 | 150 | ||
151 | static bool scroll_process_message(int delay) | 151 | static bool scroll_process_message(int delay) |
152 | { | 152 | { |
153 | struct event ev; | 153 | struct queue_event ev; |
154 | 154 | ||
155 | do | 155 | do |
156 | { | 156 | { |
@@ -268,7 +268,7 @@ void scroll_init(void) | |||
268 | queue_init(&scroll_queue, true); | 268 | queue_init(&scroll_queue, true); |
269 | #endif | 269 | #endif |
270 | create_thread(scroll_thread, scroll_stack, | 270 | create_thread(scroll_thread, scroll_stack, |
271 | sizeof(scroll_stack), scroll_name | 271 | sizeof(scroll_stack), 0, scroll_name |
272 | IF_PRIO(, PRIORITY_USER_INTERFACE) | 272 | IF_PRIO(, PRIORITY_USER_INTERFACE) |
273 | IF_COP(, CPU, false)); | 273 | IF_COP(, CPU)); |
274 | } | 274 | } |
diff --git a/firmware/system.c b/firmware/system.c index 6ff0dbb5d1..0b5ae1719e 100644 --- a/firmware/system.c +++ b/firmware/system.c | |||
@@ -35,6 +35,13 @@ long cpu_frequency NOCACHEBSS_ATTR = CPU_FREQ; | |||
35 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ | 35 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ |
36 | static int boost_counter NOCACHEBSS_ATTR = 0; | 36 | static int boost_counter NOCACHEBSS_ATTR = 0; |
37 | static bool cpu_idle NOCACHEBSS_ATTR = false; | 37 | static bool cpu_idle NOCACHEBSS_ATTR = false; |
38 | #if NUM_CORES > 1 | ||
39 | struct spinlock boostctrl_spin NOCACHEBSS_ATTR; | ||
40 | void cpu_boost_init(void) | ||
41 | { | ||
42 | spinlock_init(&boostctrl_spin, SPINLOCK_NO_TASK_SWITCH); | ||
43 | } | ||
44 | #endif | ||
38 | 45 | ||
39 | int get_cpu_boost_counter(void) | 46 | int get_cpu_boost_counter(void) |
40 | { | 47 | { |
@@ -52,25 +59,51 @@ int cpu_boost_log_getcount(void) | |||
52 | } | 59 | } |
53 | char * cpu_boost_log_getlog_first(void) | 60 | char * cpu_boost_log_getlog_first(void) |
54 | { | 61 | { |
62 | char *first; | ||
63 | #if NUM_CORES > 1 | ||
64 | spinlock_lock(&boostctrl_spin); | ||
65 | #endif | ||
66 | |||
67 | first = NULL; | ||
68 | |||
55 | if (cpu_boost_calls_count) | 69 | if (cpu_boost_calls_count) |
56 | { | 70 | { |
57 | cpu_boost_track_message = 1; | 71 | cpu_boost_track_message = 1; |
58 | return cpu_boost_calls[cpu_boost_first]; | 72 | first = cpu_boost_calls[cpu_boost_first]; |
59 | } | 73 | } |
60 | else return NULL; | 74 | |
75 | #if NUM_CORES > 1 | ||
76 | spinlock_unlock(&boostctrl_spin); | ||
77 | #endif | ||
61 | } | 78 | } |
62 | char * cpu_boost_log_getlog_next(void) | 79 | char * cpu_boost_log_getlog_next(void) |
63 | { | 80 | { |
64 | int message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG; | 81 | int message; |
82 | char *next; | ||
83 | |||
84 | #if NUM_CORES > 1 | ||
85 | spinlock_lock(&boostctrl_spin); | ||
86 | #endif | ||
87 | |||
88 | message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG; | ||
89 | next = NULL; | ||
90 | |||
65 | if (cpu_boost_track_message < cpu_boost_calls_count) | 91 | if (cpu_boost_track_message < cpu_boost_calls_count) |
66 | { | 92 | { |
67 | cpu_boost_track_message++; | 93 | cpu_boost_track_message++; |
68 | return cpu_boost_calls[message]; | 94 | next = cpu_boost_calls[message]; |
69 | } | 95 | } |
70 | else return NULL; | 96 | |
97 | #if NUM_CORES > 1 | ||
98 | spinlock_unlock(&boostctrl_spin); | ||
99 | #endif | ||
71 | } | 100 | } |
72 | void cpu_boost_(bool on_off, char* location, int line) | 101 | void cpu_boost_(bool on_off, char* location, int line) |
73 | { | 102 | { |
103 | #if NUM_CORES > 1 | ||
104 | spinlock_lock(&boostctrl_spin); | ||
105 | #endif | ||
106 | |||
74 | if (cpu_boost_calls_count == MAX_BOOST_LOG) | 107 | if (cpu_boost_calls_count == MAX_BOOST_LOG) |
75 | { | 108 | { |
76 | cpu_boost_first = (cpu_boost_first+1)%MAX_BOOST_LOG; | 109 | cpu_boost_first = (cpu_boost_first+1)%MAX_BOOST_LOG; |
@@ -88,32 +121,46 @@ void cpu_boost_(bool on_off, char* location, int line) | |||
88 | #else | 121 | #else |
89 | void cpu_boost(bool on_off) | 122 | void cpu_boost(bool on_off) |
90 | { | 123 | { |
124 | #if NUM_CORES > 1 | ||
125 | spinlock_lock(&boostctrl_spin); | ||
91 | #endif | 126 | #endif |
127 | |||
128 | #endif /* CPU_BOOST_LOGGING */ | ||
92 | if(on_off) | 129 | if(on_off) |
93 | { | 130 | { |
94 | /* Boost the frequency if not already boosted */ | 131 | /* Boost the frequency if not already boosted */ |
95 | if(boost_counter++ == 0) | 132 | if(++boost_counter == 1) |
96 | set_cpu_frequency(CPUFREQ_MAX); | 133 | set_cpu_frequency(CPUFREQ_MAX); |
97 | } | 134 | } |
98 | else | 135 | else |
99 | { | 136 | { |
100 | /* Lower the frequency if the counter reaches 0 */ | 137 | /* Lower the frequency if the counter reaches 0 */ |
101 | if(--boost_counter == 0) | 138 | if(--boost_counter <= 0) |
102 | { | 139 | { |
103 | if(cpu_idle) | 140 | if(cpu_idle) |
104 | set_cpu_frequency(CPUFREQ_DEFAULT); | 141 | set_cpu_frequency(CPUFREQ_DEFAULT); |
105 | else | 142 | else |
106 | set_cpu_frequency(CPUFREQ_NORMAL); | 143 | set_cpu_frequency(CPUFREQ_NORMAL); |
107 | } | ||
108 | 144 | ||
109 | /* Safety measure */ | 145 | /* Safety measure */ |
110 | if(boost_counter < 0) | 146 | if (boost_counter < 0) |
111 | boost_counter = 0; | 147 | { |
148 | boost_counter = 0; | ||
149 | } | ||
150 | } | ||
112 | } | 151 | } |
152 | |||
153 | #if NUM_CORES > 1 | ||
154 | spinlock_unlock(&boostctrl_spin); | ||
155 | #endif | ||
113 | } | 156 | } |
114 | 157 | ||
115 | void cpu_idle_mode(bool on_off) | 158 | void cpu_idle_mode(bool on_off) |
116 | { | 159 | { |
160 | #if NUM_CORES > 1 | ||
161 | spinlock_lock(&boostctrl_spin); | ||
162 | #endif | ||
163 | |||
117 | cpu_idle = on_off; | 164 | cpu_idle = on_off; |
118 | 165 | ||
119 | /* We need to adjust the frequency immediately if the CPU | 166 | /* We need to adjust the frequency immediately if the CPU |
@@ -125,6 +172,10 @@ void cpu_idle_mode(bool on_off) | |||
125 | else | 172 | else |
126 | set_cpu_frequency(CPUFREQ_NORMAL); | 173 | set_cpu_frequency(CPUFREQ_NORMAL); |
127 | } | 174 | } |
175 | |||
176 | #if NUM_CORES > 1 | ||
177 | spinlock_unlock(&boostctrl_spin); | ||
178 | #endif | ||
128 | } | 179 | } |
129 | #endif /* HAVE_ADJUSTABLE_CPU_FREQ */ | 180 | #endif /* HAVE_ADJUSTABLE_CPU_FREQ */ |
130 | 181 | ||
@@ -199,6 +250,7 @@ void UIE(unsigned int pc, unsigned int num) | |||
199 | /* TODO: perhaps add button handling in here when we get a polling | 250 | /* TODO: perhaps add button handling in here when we get a polling |
200 | driver some day. | 251 | driver some day. |
201 | */ | 252 | */ |
253 | core_idle(); | ||
202 | } | 254 | } |
203 | } | 255 | } |
204 | 256 | ||
diff --git a/firmware/target/arm/i2c-pp.c b/firmware/target/arm/i2c-pp.c index 1cc25a1a10..e5813f9f9a 100644 --- a/firmware/target/arm/i2c-pp.c +++ b/firmware/target/arm/i2c-pp.c | |||
@@ -132,18 +132,18 @@ static int pp_i2c_send_byte(unsigned int addr, int data0) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /* Public functions */ | 134 | /* Public functions */ |
135 | static struct mutex i2c_mutex; | 135 | struct spinlock i2c_spin NOCACHEBSS_ATTR; |
136 | 136 | ||
137 | int i2c_readbytes(unsigned int dev_addr, int addr, int len, unsigned char *data) { | 137 | int i2c_readbytes(unsigned int dev_addr, int addr, int len, unsigned char *data) { |
138 | unsigned int temp; | 138 | unsigned int temp; |
139 | int i; | 139 | int i; |
140 | spinlock_lock(&i2c_mutex); | 140 | spinlock_lock(&i2c_spin); |
141 | pp_i2c_send_byte(dev_addr, addr); | 141 | pp_i2c_send_byte(dev_addr, addr); |
142 | for (i = 0; i < len; i++) { | 142 | for (i = 0; i < len; i++) { |
143 | pp_i2c_read_byte(dev_addr, &temp); | 143 | pp_i2c_read_byte(dev_addr, &temp); |
144 | data[i] = temp; | 144 | data[i] = temp; |
145 | } | 145 | } |
146 | spinlock_unlock(&i2c_mutex); | 146 | spinlock_unlock(&i2c_spin); |
147 | return i; | 147 | return i; |
148 | } | 148 | } |
149 | 149 | ||
@@ -151,10 +151,10 @@ int i2c_readbyte(unsigned int dev_addr, int addr) | |||
151 | { | 151 | { |
152 | int data; | 152 | int data; |
153 | 153 | ||
154 | spinlock_lock(&i2c_mutex); | 154 | spinlock_lock(&i2c_spin); |
155 | pp_i2c_send_byte(dev_addr, addr); | 155 | pp_i2c_send_byte(dev_addr, addr); |
156 | pp_i2c_read_byte(dev_addr, &data); | 156 | pp_i2c_read_byte(dev_addr, &data); |
157 | spinlock_unlock(&i2c_mutex); | 157 | spinlock_unlock(&i2c_spin); |
158 | 158 | ||
159 | return data; | 159 | return data; |
160 | } | 160 | } |
@@ -167,9 +167,9 @@ int pp_i2c_send(unsigned int addr, int data0, int data1) | |||
167 | data[0] = data0; | 167 | data[0] = data0; |
168 | data[1] = data1; | 168 | data[1] = data1; |
169 | 169 | ||
170 | spinlock_lock(&i2c_mutex); | 170 | spinlock_lock(&i2c_spin); |
171 | retval = pp_i2c_send_bytes(addr, 2, data); | 171 | retval = pp_i2c_send_bytes(addr, 2, data); |
172 | spinlock_unlock(&i2c_mutex); | 172 | spinlock_unlock(&i2c_spin); |
173 | 173 | ||
174 | return retval; | 174 | return retval; |
175 | } | 175 | } |
@@ -221,7 +221,7 @@ void i2c_init(void) | |||
221 | #endif | 221 | #endif |
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | spinlock_init(&i2c_mutex); | 224 | spinlock_init(&i2c_spin IF_COP(, SPINLOCK_TASK_SWITCH)); |
225 | 225 | ||
226 | i2c_readbyte(0x8, 0); | 226 | i2c_readbyte(0x8, 0); |
227 | } | 227 | } |
diff --git a/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c b/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c index 8866c3dcde..3a854afcdc 100644 --- a/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c +++ b/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include "hwcompat.h" | 22 | #include "hwcompat.h" |
23 | #include "kernel.h" | 23 | #include "kernel.h" |
24 | 24 | ||
25 | static struct mutex adc_mutex NOCACHEBSS_ATTR; | 25 | static struct spinlock adc_spin NOCACHEBSS_ATTR; |
26 | 26 | ||
27 | /* used in the 2nd gen ADC interrupt */ | 27 | /* used in the 2nd gen ADC interrupt */ |
28 | static unsigned int_data; | 28 | static unsigned int_data; |
@@ -33,7 +33,7 @@ unsigned short adc_scan(int channel) | |||
33 | unsigned short data = 0; | 33 | unsigned short data = 0; |
34 | 34 | ||
35 | (void)channel; /* there is only one */ | 35 | (void)channel; /* there is only one */ |
36 | spinlock_lock(&adc_mutex); | 36 | spinlock_lock(&adc_spin); |
37 | 37 | ||
38 | if ((IPOD_HW_REVISION >> 16) == 1) | 38 | if ((IPOD_HW_REVISION >> 16) == 1) |
39 | { | 39 | { |
@@ -69,7 +69,7 @@ unsigned short adc_scan(int channel) | |||
69 | 69 | ||
70 | data = int_data & 0xff; | 70 | data = int_data & 0xff; |
71 | } | 71 | } |
72 | spinlock_unlock(&adc_mutex); | 72 | spinlock_unlock(&adc_spin); |
73 | return data; | 73 | return data; |
74 | } | 74 | } |
75 | 75 | ||
@@ -100,7 +100,7 @@ void ipod_2g_adc_int(void) | |||
100 | 100 | ||
101 | void adc_init(void) | 101 | void adc_init(void) |
102 | { | 102 | { |
103 | spinlock_init(&adc_mutex); | 103 | spinlock_init(&adc_spin IF_COP(, SPINLOCK_TASK_SWITCH)); |
104 | 104 | ||
105 | GPIOB_ENABLE |= 0x1e; /* enable B1..B4 */ | 105 | GPIOB_ENABLE |= 0x1e; /* enable B1..B4 */ |
106 | 106 | ||
diff --git a/firmware/target/arm/sandisk/adc-c200_e200.c b/firmware/target/arm/sandisk/adc-c200_e200.c index 31321ece37..9dc8f3aabb 100644 --- a/firmware/target/arm/sandisk/adc-c200_e200.c +++ b/firmware/target/arm/sandisk/adc-c200_e200.c | |||
@@ -21,8 +21,6 @@ | |||
21 | #include "i2c-pp.h" | 21 | #include "i2c-pp.h" |
22 | #include "as3514.h" | 22 | #include "as3514.h" |
23 | 23 | ||
24 | static struct mutex adc_mutex NOCACHEBSS_ATTR; | ||
25 | |||
26 | /* Read 10-bit channel data */ | 24 | /* Read 10-bit channel data */ |
27 | unsigned short adc_read(int channel) | 25 | unsigned short adc_read(int channel) |
28 | { | 26 | { |
@@ -30,7 +28,7 @@ unsigned short adc_read(int channel) | |||
30 | 28 | ||
31 | if ((unsigned)channel < NUM_ADC_CHANNELS) | 29 | if ((unsigned)channel < NUM_ADC_CHANNELS) |
32 | { | 30 | { |
33 | spinlock_lock(&adc_mutex); | 31 | spinlock_lock(&i2c_spin); |
34 | 32 | ||
35 | /* Select channel */ | 33 | /* Select channel */ |
36 | if (pp_i2c_send( AS3514_I2C_ADDR, ADC_0, (channel << 4)) >= 0) | 34 | if (pp_i2c_send( AS3514_I2C_ADDR, ADC_0, (channel << 4)) >= 0) |
@@ -44,7 +42,7 @@ unsigned short adc_read(int channel) | |||
44 | } | 42 | } |
45 | } | 43 | } |
46 | 44 | ||
47 | spinlock_unlock(&adc_mutex); | 45 | spinlock_unlock(&i2c_spin); |
48 | } | 46 | } |
49 | 47 | ||
50 | return data; | 48 | return data; |
@@ -52,5 +50,4 @@ unsigned short adc_read(int channel) | |||
52 | 50 | ||
53 | void adc_init(void) | 51 | void adc_init(void) |
54 | { | 52 | { |
55 | spinlock_init(&adc_mutex); | ||
56 | } | 53 | } |
diff --git a/firmware/target/arm/sandisk/ata-c200_e200.c b/firmware/target/arm/sandisk/ata-c200_e200.c index 14be27e19d..8e17152e6f 100644 --- a/firmware/target/arm/sandisk/ata-c200_e200.c +++ b/firmware/target/arm/sandisk/ata-c200_e200.c | |||
@@ -162,7 +162,7 @@ static struct sd_card_status sd_status[NUM_VOLUMES] = | |||
162 | /* Shoot for around 75% usage */ | 162 | /* Shoot for around 75% usage */ |
163 | static long sd_stack [(DEFAULT_STACK_SIZE*2 + 0x1c0)/sizeof(long)]; | 163 | static long sd_stack [(DEFAULT_STACK_SIZE*2 + 0x1c0)/sizeof(long)]; |
164 | static const char sd_thread_name[] = "ata/sd"; | 164 | static const char sd_thread_name[] = "ata/sd"; |
165 | static struct mutex sd_mtx; | 165 | static struct spinlock sd_spin NOCACHEBSS_ATTR; |
166 | static struct event_queue sd_queue; | 166 | static struct event_queue sd_queue; |
167 | 167 | ||
168 | /* Posted when card plugged status has changed */ | 168 | /* Posted when card plugged status has changed */ |
@@ -801,7 +801,7 @@ int ata_read_sectors(IF_MV2(int drive,) unsigned long start, int incount, | |||
801 | 801 | ||
802 | /* TODO: Add DMA support. */ | 802 | /* TODO: Add DMA support. */ |
803 | 803 | ||
804 | spinlock_lock(&sd_mtx); | 804 | spinlock_lock(&sd_spin); |
805 | 805 | ||
806 | ata_led(true); | 806 | ata_led(true); |
807 | 807 | ||
@@ -888,7 +888,7 @@ ata_read_retry: | |||
888 | while (1) | 888 | while (1) |
889 | { | 889 | { |
890 | ata_led(false); | 890 | ata_led(false); |
891 | spinlock_unlock(&sd_mtx); | 891 | spinlock_unlock(&sd_spin); |
892 | 892 | ||
893 | return ret; | 893 | return ret; |
894 | 894 | ||
@@ -916,7 +916,7 @@ int ata_write_sectors(IF_MV2(int drive,) unsigned long start, int count, | |||
916 | const unsigned char *buf, *buf_end; | 916 | const unsigned char *buf, *buf_end; |
917 | int bank; | 917 | int bank; |
918 | 918 | ||
919 | spinlock_lock(&sd_mtx); | 919 | spinlock_lock(&sd_spin); |
920 | 920 | ||
921 | ata_led(true); | 921 | ata_led(true); |
922 | 922 | ||
@@ -1016,7 +1016,7 @@ ata_write_retry: | |||
1016 | while (1) | 1016 | while (1) |
1017 | { | 1017 | { |
1018 | ata_led(false); | 1018 | ata_led(false); |
1019 | spinlock_unlock(&sd_mtx); | 1019 | spinlock_unlock(&sd_spin); |
1020 | 1020 | ||
1021 | return ret; | 1021 | return ret; |
1022 | 1022 | ||
@@ -1034,7 +1034,7 @@ ata_write_error: | |||
1034 | static void sd_thread(void) __attribute__((noreturn)); | 1034 | static void sd_thread(void) __attribute__((noreturn)); |
1035 | static void sd_thread(void) | 1035 | static void sd_thread(void) |
1036 | { | 1036 | { |
1037 | struct event ev; | 1037 | struct queue_event ev; |
1038 | bool idle_notified = false; | 1038 | bool idle_notified = false; |
1039 | 1039 | ||
1040 | while (1) | 1040 | while (1) |
@@ -1050,10 +1050,9 @@ static void sd_thread(void) | |||
1050 | 1050 | ||
1051 | /* Lock to keep us from messing with this variable while an init | 1051 | /* Lock to keep us from messing with this variable while an init |
1052 | may be in progress */ | 1052 | may be in progress */ |
1053 | spinlock_lock(&sd_mtx); | 1053 | spinlock_lock(&sd_spin); |
1054 | card_info[1].initialized = 0; | 1054 | card_info[1].initialized = 0; |
1055 | sd_status[1].retry = 0; | 1055 | sd_status[1].retry = 0; |
1056 | spinlock_unlock(&sd_mtx); | ||
1057 | 1056 | ||
1058 | /* Either unmount because the card was pulled or unmount and | 1057 | /* Either unmount because the card was pulled or unmount and |
1059 | remount if already mounted since multiple messages may be | 1058 | remount if already mounted since multiple messages may be |
@@ -1073,6 +1072,8 @@ static void sd_thread(void) | |||
1073 | 1072 | ||
1074 | if (action != SDA_NONE) | 1073 | if (action != SDA_NONE) |
1075 | queue_broadcast(SYS_FS_CHANGED, 0); | 1074 | queue_broadcast(SYS_FS_CHANGED, 0); |
1075 | |||
1076 | spinlock_unlock(&sd_spin); | ||
1076 | break; | 1077 | break; |
1077 | } /* SD_HOTSWAP */ | 1078 | } /* SD_HOTSWAP */ |
1078 | #endif /* HAVE_HOTSWAP */ | 1079 | #endif /* HAVE_HOTSWAP */ |
@@ -1155,9 +1156,9 @@ int ata_init(void) | |||
1155 | { | 1156 | { |
1156 | initialized = true; | 1157 | initialized = true; |
1157 | 1158 | ||
1158 | spinlock_init(&sd_mtx); | 1159 | spinlock_init(&sd_spin IF_COP(, SPINLOCK_TASK_SWITCH)); |
1159 | 1160 | ||
1160 | spinlock_lock(&sd_mtx); | 1161 | spinlock_lock(&sd_spin); |
1161 | 1162 | ||
1162 | /* init controller */ | 1163 | /* init controller */ |
1163 | outl(inl(0x70000088) & ~(0x4), 0x70000088); | 1164 | outl(inl(0x70000088) & ~(0x4), 0x70000088); |
@@ -1181,8 +1182,8 @@ int ata_init(void) | |||
1181 | ret = currcard->initialized; | 1182 | ret = currcard->initialized; |
1182 | 1183 | ||
1183 | queue_init(&sd_queue, true); | 1184 | queue_init(&sd_queue, true); |
1184 | create_thread(sd_thread, sd_stack, sizeof(sd_stack), | 1185 | create_thread(sd_thread, sd_stack, sizeof(sd_stack), 0, |
1185 | sd_thread_name IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU, false)); | 1186 | sd_thread_name IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU)); |
1186 | 1187 | ||
1187 | /* enable interupt for the mSD card */ | 1188 | /* enable interupt for the mSD card */ |
1188 | sleep(HZ/10); | 1189 | sleep(HZ/10); |
@@ -1195,7 +1196,7 @@ int ata_init(void) | |||
1195 | GPIOA_INT_CLR = 0x80; | 1196 | GPIOA_INT_CLR = 0x80; |
1196 | GPIOA_INT_EN |= 0x80; | 1197 | GPIOA_INT_EN |= 0x80; |
1197 | #endif | 1198 | #endif |
1198 | spinlock_unlock(&sd_mtx); | 1199 | spinlock_unlock(&sd_spin); |
1199 | } | 1200 | } |
1200 | 1201 | ||
1201 | return ret; | 1202 | return ret; |
diff --git a/firmware/target/arm/system-pp502x.c b/firmware/target/arm/system-pp502x.c index 576459d6c1..d24d19f747 100644 --- a/firmware/target/arm/system-pp502x.c +++ b/firmware/target/arm/system-pp502x.c | |||
@@ -21,10 +21,6 @@ | |||
21 | #include "i2s.h" | 21 | #include "i2s.h" |
22 | #include "i2c-pp.h" | 22 | #include "i2c-pp.h" |
23 | 23 | ||
24 | #if NUM_CORES > 1 | ||
25 | struct mutex boostctrl_mtx NOCACHEBSS_ATTR; | ||
26 | #endif | ||
27 | |||
28 | #ifndef BOOTLOADER | 24 | #ifndef BOOTLOADER |
29 | extern void TIMER1(void); | 25 | extern void TIMER1(void); |
30 | extern void TIMER2(void); | 26 | extern void TIMER2(void); |
@@ -129,16 +125,42 @@ static void init_cache(void) | |||
129 | } | 125 | } |
130 | 126 | ||
131 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ | 127 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ |
128 | void scale_suspend_core(bool suspend) ICODE_ATTR; | ||
129 | void scale_suspend_core(bool suspend) | ||
130 | { | ||
131 | unsigned int core = CURRENT_CORE; | ||
132 | unsigned int othercore = 1 - core; | ||
133 | static unsigned long proc_bits IBSS_ATTR; | ||
134 | static int oldstatus IBSS_ATTR; | ||
135 | |||
136 | if (suspend) | ||
137 | { | ||
138 | oldstatus = set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS); | ||
139 | proc_bits = PROC_CTL(othercore) & 0xc0000000; | ||
140 | PROC_CTL(othercore) = 0x40000000; nop; | ||
141 | PROC_CTL(core) = 0x48000003; nop; | ||
142 | } | ||
143 | else | ||
144 | { | ||
145 | PROC_CTL(core) = 0x4800001f; nop; | ||
146 | if (proc_bits == 0) | ||
147 | PROC_CTL(othercore) = 0; | ||
148 | set_interrupt_status(oldstatus, IRQ_FIQ_STATUS); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | void set_cpu_frequency(long frequency) ICODE_ATTR; | ||
132 | void set_cpu_frequency(long frequency) | 153 | void set_cpu_frequency(long frequency) |
133 | #else | 154 | #else |
134 | static void pp_set_cpu_frequency(long frequency) | 155 | static void pp_set_cpu_frequency(long frequency) |
135 | #endif | 156 | #endif |
136 | { | 157 | { |
137 | #if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1) | 158 | #if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1) |
138 | /* Using mutex or spinlock isn't safe here. */ | 159 | spinlock_lock(&boostctrl_spin); |
139 | while (test_and_set(&boostctrl_mtx.locked, 1)) ; | ||
140 | #endif | 160 | #endif |
141 | 161 | ||
162 | scale_suspend_core(true); | ||
163 | |||
142 | cpu_frequency = frequency; | 164 | cpu_frequency = frequency; |
143 | 165 | ||
144 | switch (frequency) | 166 | switch (frequency) |
@@ -149,17 +171,20 @@ static void pp_set_cpu_frequency(long frequency) | |||
149 | * have this limitation (and the post divider?) */ | 171 | * have this limitation (and the post divider?) */ |
150 | case CPUFREQ_MAX: | 172 | case CPUFREQ_MAX: |
151 | CLOCK_SOURCE = 0x10007772; /* source #1: 24MHz, #2, #3, #4: PLL */ | 173 | CLOCK_SOURCE = 0x10007772; /* source #1: 24MHz, #2, #3, #4: PLL */ |
152 | DEV_TIMING1 = 0x00000808; | 174 | DEV_TIMING1 = 0x00000303; |
153 | #if CONFIG_CPU == PP5020 | 175 | #if CONFIG_CPU == PP5020 |
154 | PLL_CONTROL = 0x8a020a03; /* 10/3 * 24MHz */ | 176 | PLL_CONTROL = 0x8a020a03; /* 10/3 * 24MHz */ |
155 | PLL_STATUS = 0xd19b; /* unlock frequencies > 66MHz */ | 177 | PLL_STATUS = 0xd19b; /* unlock frequencies > 66MHz */ |
156 | PLL_CONTROL = 0x8a020a03; /* repeat setup */ | 178 | PLL_CONTROL = 0x8a020a03; /* repeat setup */ |
179 | scale_suspend_core(false); | ||
157 | udelay(500); /* wait for relock */ | 180 | udelay(500); /* wait for relock */ |
158 | #elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024) | 181 | #elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024) |
159 | PLL_CONTROL = 0x8a121403; /* (20/3 * 24MHz) / 2 */ | 182 | PLL_CONTROL = 0x8a121403; /* (20/3 * 24MHz) / 2 */ |
183 | scale_suspend_core(false); | ||
160 | udelay(250); | 184 | udelay(250); |
161 | while (!(PLL_STATUS & 0x80000000)); /* wait for relock */ | 185 | while (!(PLL_STATUS & 0x80000000)); /* wait for relock */ |
162 | #endif | 186 | #endif |
187 | scale_suspend_core(true); | ||
163 | break; | 188 | break; |
164 | 189 | ||
165 | case CPUFREQ_NORMAL: | 190 | case CPUFREQ_NORMAL: |
@@ -167,18 +192,23 @@ static void pp_set_cpu_frequency(long frequency) | |||
167 | DEV_TIMING1 = 0x00000303; | 192 | DEV_TIMING1 = 0x00000303; |
168 | #if CONFIG_CPU == PP5020 | 193 | #if CONFIG_CPU == PP5020 |
169 | PLL_CONTROL = 0x8a020504; /* 5/4 * 24MHz */ | 194 | PLL_CONTROL = 0x8a020504; /* 5/4 * 24MHz */ |
195 | scale_suspend_core(false); | ||
170 | udelay(500); /* wait for relock */ | 196 | udelay(500); /* wait for relock */ |
171 | #elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024) | 197 | #elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024) |
172 | PLL_CONTROL = 0x8a220501; /* (5/1 * 24MHz) / 4 */ | 198 | PLL_CONTROL = 0x8a220501; /* (5/1 * 24MHz) / 4 */ |
199 | scale_suspend_core(false); | ||
173 | udelay(250); | 200 | udelay(250); |
174 | while (!(PLL_STATUS & 0x80000000)); /* wait for relock */ | 201 | while (!(PLL_STATUS & 0x80000000)); /* wait for relock */ |
175 | #endif | 202 | #endif |
203 | scale_suspend_core(true); | ||
176 | break; | 204 | break; |
177 | 205 | ||
178 | case CPUFREQ_SLEEP: | 206 | case CPUFREQ_SLEEP: |
179 | CLOCK_SOURCE = 0x10002202; /* source #2: 32kHz, #1, #3, #4: 24MHz */ | 207 | CLOCK_SOURCE = 0x10002202; /* source #2: 32kHz, #1, #3, #4: 24MHz */ |
180 | PLL_CONTROL &= ~0x80000000; /* disable PLL */ | 208 | PLL_CONTROL &= ~0x80000000; /* disable PLL */ |
209 | scale_suspend_core(false); | ||
181 | udelay(10000); /* let 32kHz source stabilize? */ | 210 | udelay(10000); /* let 32kHz source stabilize? */ |
211 | scale_suspend_core(true); | ||
182 | break; | 212 | break; |
183 | 213 | ||
184 | default: | 214 | default: |
@@ -186,12 +216,19 @@ static void pp_set_cpu_frequency(long frequency) | |||
186 | DEV_TIMING1 = 0x00000303; | 216 | DEV_TIMING1 = 0x00000303; |
187 | PLL_CONTROL &= ~0x80000000; /* disable PLL */ | 217 | PLL_CONTROL &= ~0x80000000; /* disable PLL */ |
188 | cpu_frequency = CPUFREQ_DEFAULT; | 218 | cpu_frequency = CPUFREQ_DEFAULT; |
219 | PROC_CTL(CURRENT_CORE) = 0x4800001f; nop; | ||
189 | break; | 220 | break; |
190 | } | 221 | } |
222 | |||
223 | if (frequency == CPUFREQ_MAX) | ||
224 | DEV_TIMING1 = 0x00000808; | ||
225 | |||
191 | CLOCK_SOURCE = (CLOCK_SOURCE & ~0xf0000000) | 0x20000000; /* select source #2 */ | 226 | CLOCK_SOURCE = (CLOCK_SOURCE & ~0xf0000000) | 0x20000000; /* select source #2 */ |
192 | 227 | ||
228 | scale_suspend_core(false); | ||
229 | |||
193 | #if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1) | 230 | #if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1) |
194 | boostctrl_mtx.locked = 0; | 231 | spinlock_unlock(&boostctrl_spin); |
195 | #endif | 232 | #endif |
196 | } | 233 | } |
197 | #endif /* !BOOTLOADER */ | 234 | #endif /* !BOOTLOADER */ |
@@ -256,7 +293,7 @@ void system_init(void) | |||
256 | 293 | ||
257 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ | 294 | #ifdef HAVE_ADJUSTABLE_CPU_FREQ |
258 | #if NUM_CORES > 1 | 295 | #if NUM_CORES > 1 |
259 | spinlock_init(&boostctrl_mtx); | 296 | cpu_boost_init(); |
260 | #endif | 297 | #endif |
261 | #else | 298 | #else |
262 | pp_set_cpu_frequency(CPUFREQ_MAX); | 299 | pp_set_cpu_frequency(CPUFREQ_MAX); |
diff --git a/firmware/target/arm/system-target.h b/firmware/target/arm/system-target.h index 7a1ff4f79a..6e433be9d5 100644 --- a/firmware/target/arm/system-target.h +++ b/firmware/target/arm/system-target.h | |||
@@ -46,6 +46,10 @@ | |||
46 | #define inw(a) (*(volatile unsigned short *) (a)) | 46 | #define inw(a) (*(volatile unsigned short *) (a)) |
47 | #define outw(a,b) (*(volatile unsigned short *) (b) = (a)) | 47 | #define outw(a,b) (*(volatile unsigned short *) (b) = (a)) |
48 | 48 | ||
49 | #if defined(HAVE_ADJUSTABLE_CPU_FREQ) && NUM_CORES > 1 | ||
50 | extern struct spinlock boostctrl_spin; | ||
51 | #endif | ||
52 | |||
49 | static inline void udelay(unsigned usecs) | 53 | static inline void udelay(unsigned usecs) |
50 | { | 54 | { |
51 | unsigned stop = USEC_TIMER + usecs; | 55 | unsigned stop = USEC_TIMER + usecs; |
@@ -107,7 +111,6 @@ void flush_icache(void); | |||
107 | 111 | ||
108 | #endif /* CPU_PP502x */ | 112 | #endif /* CPU_PP502x */ |
109 | 113 | ||
110 | |||
111 | #endif /* CPU_PP */ | 114 | #endif /* CPU_PP */ |
112 | 115 | ||
113 | #endif /* SYSTEM_TARGET_H */ | 116 | #endif /* SYSTEM_TARGET_H */ |
diff --git a/firmware/test/i2c/main.c b/firmware/test/i2c/main.c index 88aa9151d6..0d54da5dea 100644 --- a/firmware/test/i2c/main.c +++ b/firmware/test/i2c/main.c | |||
@@ -708,7 +708,7 @@ int main(void) | |||
708 | 708 | ||
709 | 709 | ||
710 | 710 | ||
711 | create_thread(mpeg_thread, stack - 0x2000, 0x4000); | 711 | create_thread(mpeg_thread, stack - 0x2000, 0x4000, 0); |
712 | 712 | ||
713 | 713 | ||
714 | 714 | ||
@@ -1004,7 +1004,7 @@ void mpeg_thread(void) | |||
1004 | 1004 | ||
1005 | { | 1005 | { |
1006 | 1006 | ||
1007 | struct event ev; | 1007 | struct queue_event ev; |
1008 | 1008 | ||
1009 | int len; | 1009 | int len; |
1010 | 1010 | ||
diff --git a/firmware/test/kernel/main.c b/firmware/test/kernel/main.c index b651324ae1..99642c867d 100644 --- a/firmware/test/kernel/main.c +++ b/firmware/test/kernel/main.c | |||
@@ -44,7 +44,7 @@ int main(void) | |||
44 | char buf[40]; | 44 | char buf[40]; |
45 | char str[32]; | 45 | char str[32]; |
46 | int i=0; | 46 | int i=0; |
47 | struct event *ev; | 47 | struct queue_event *ev; |
48 | 48 | ||
49 | /* Clear it all! */ | 49 | /* Clear it all! */ |
50 | SSR1 &= ~(SCI_RDRF | SCI_ORER | SCI_PER | SCI_FER); | 50 | SSR1 &= ~(SCI_RDRF | SCI_ORER | SCI_PER | SCI_FER); |
@@ -69,8 +69,8 @@ int main(void) | |||
69 | 69 | ||
70 | queue_init(&main_q); | 70 | queue_init(&main_q); |
71 | 71 | ||
72 | create_thread(t1, s1, 1024); | 72 | create_thread(t1, s1, 1024, 0); |
73 | create_thread(t2, s2, 1024); | 73 | create_thread(t2, s2, 1024, 0); |
74 | 74 | ||
75 | while(1) | 75 | while(1) |
76 | { | 76 | { |
diff --git a/firmware/thread.c b/firmware/thread.c index 619a1e135a..c9ce049ea1 100644 --- a/firmware/thread.c +++ b/firmware/thread.c | |||
@@ -29,43 +29,150 @@ | |||
29 | #include <profile.h> | 29 | #include <profile.h> |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #if NUM_CORES > 1 | 32 | /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ |
33 | # define IF_COP2(x) x | 33 | #ifdef DEBUG |
34 | #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */ | ||
34 | #else | 35 | #else |
35 | # define IF_COP2(x) CURRENT_CORE | 36 | #define THREAD_EXTRA_CHECKS 0 |
36 | #endif | 37 | #endif |
37 | 38 | ||
39 | /** | ||
40 | * General locking order to guarantee progress. Order must be observed but | ||
41 | * all stages are not nescessarily obligatory. Going from 1) to 3) is | ||
42 | * perfectly legal. | ||
43 | * | ||
44 | * 1) IRQ | ||
45 | * This is first because of the likelyhood of having an interrupt occur that | ||
46 | * also accesses one of the objects farther down the list. Any non-blocking | ||
47 | * synchronization done may already have a lock on something during normal | ||
48 | * execution and if an interrupt handler running on the same processor as | ||
49 | * the one that has the resource locked were to attempt to access the | ||
50 | * resource, the interrupt handler would wait forever waiting for an unlock | ||
51 | * that will never happen. There is no danger if the interrupt occurs on | ||
52 | * a different processor because the one that has the lock will eventually | ||
53 | * unlock and the other processor's handler may proceed at that time. Not | ||
54 | * nescessary when the resource in question is definitely not available to | ||
55 | * interrupt handlers. | ||
56 | * | ||
57 | * 2) Kernel Object | ||
58 | * 1) May be needed beforehand if the kernel object allows dual-use such as | ||
59 | * event queues. The kernel object must have a scheme to protect itself from | ||
60 | * access by another processor and is responsible for serializing the calls | ||
61 | * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each | ||
62 | * other. If a thread blocks on an object it must fill-in the blk_ops members | ||
63 | * for its core to unlock _after_ the thread's context has been saved and the | ||
64 | * unlocking will be done in reverse from this heirarchy. | ||
65 | * | ||
66 | * 3) Thread Slot | ||
67 | * This locks access to the thread's slot such that its state cannot be | ||
68 | * altered by another processor when a state change is in progress such as | ||
69 | * when it is in the process of going on a blocked list. An attempt to wake | ||
70 | * a thread while it is still blocking will likely desync its state with | ||
71 | * the other resources used for that state. | ||
72 | * | ||
73 | * 4) Lists | ||
74 | * Usually referring to a list (aka. queue) that a thread will be blocking | ||
75 | * on that belongs to some object and is shareable amongst multiple | ||
76 | * processors. Parts of the scheduler may have access to them without actually | ||
77 | * locking the kernel object such as when a thread is blocked with a timeout | ||
78 | * (such as calling queue_wait_w_tmo). Of course the kernel object also gets | ||
79 | * it lists locked when the thread blocks so that all object list access is | ||
80 | * synchronized. Failure to do so would corrupt the list links. | ||
81 | * | ||
82 | * 5) Core Lists | ||
83 | * These lists are specific to a particular processor core and are accessible | ||
84 | * by all processor cores and interrupt handlers. They are used when an | ||
85 | * operation may only be performed by the thread's own core in a normal | ||
86 | * execution context. The wakeup list is the prime example where a thread | ||
87 | * may be added by any means and the thread's own core will remove it from | ||
88 | * the wakeup list and put it on the running list (which is only ever | ||
89 | * accessible by its own processor). | ||
90 | */ | ||
38 | #define DEADBEEF ((unsigned int)0xdeadbeef) | 91 | #define DEADBEEF ((unsigned int)0xdeadbeef) |
39 | /* Cast to the the machine int type, whose size could be < 4. */ | 92 | /* Cast to the the machine int type, whose size could be < 4. */ |
40 | |||
41 | struct core_entry cores[NUM_CORES] IBSS_ATTR; | 93 | struct core_entry cores[NUM_CORES] IBSS_ATTR; |
42 | struct thread_entry threads[MAXTHREADS] IBSS_ATTR; | 94 | struct thread_entry threads[MAXTHREADS] IBSS_ATTR; |
43 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 95 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
44 | static int boosted_threads IBSS_ATTR; | 96 | static int boosted_threads IBSS_ATTR; |
45 | #endif | 97 | #endif |
46 | 98 | ||
47 | /* Define to enable additional checks for blocking violations etc. */ | ||
48 | #define THREAD_EXTRA_CHECKS 0 | ||
49 | |||
50 | static const char main_thread_name[] = "main"; | 99 | static const char main_thread_name[] = "main"; |
51 | |||
52 | extern int stackbegin[]; | 100 | extern int stackbegin[]; |
53 | extern int stackend[]; | 101 | extern int stackend[]; |
54 | 102 | ||
55 | /* Conserve IRAM | 103 | /* core_sleep procedure to implement for any CPU to ensure an asychronous wakup |
56 | static void add_to_list(struct thread_entry **list, | 104 | * never results in requiring a wait until the next tick (up to 10000uS!). Likely |
57 | struct thread_entry *thread) ICODE_ATTR; | 105 | * requires assembly and careful instruction ordering. Multicore requires |
58 | static void remove_from_list(struct thread_entry **list, | 106 | * carefully timed sections in order to have synchronization without locking of |
59 | struct thread_entry *thread) ICODE_ATTR; | 107 | * any sort. |
60 | */ | 108 | * |
109 | * 1) Disable all interrupts (FIQ and IRQ for ARM for instance) | ||
110 | * 2) Check *waking == NULL. | ||
111 | * 3) *waking not NULL? Goto step 7. | ||
112 | * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7. | ||
113 | * 5) If processor requires, atomically reenable interrupts and perform step 6. | ||
114 | * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire) | ||
115 | * goto step 8. | ||
116 | * 7) Reenable interrupts. | ||
117 | * 8) Exit procedure. | ||
118 | */ | ||
119 | static inline void core_sleep( | ||
120 | IF_COP(unsigned int core,) struct thread_entry **waking) | ||
121 | __attribute__((always_inline)); | ||
122 | |||
123 | static void check_tmo_threads(void) | ||
124 | __attribute__((noinline)); | ||
125 | |||
126 | static inline void block_thread_on_l( | ||
127 | struct thread_queue *list, struct thread_entry *thread, unsigned state) | ||
128 | __attribute__((always_inline)); | ||
129 | |||
130 | static inline void block_thread_on_l_no_listlock( | ||
131 | struct thread_entry **list, struct thread_entry *thread, unsigned state) | ||
132 | __attribute__((always_inline)); | ||
133 | |||
134 | static inline void _block_thread_on_l( | ||
135 | struct thread_queue *list, struct thread_entry *thread, | ||
136 | unsigned state IF_SWCL(, const bool single)) | ||
137 | __attribute__((always_inline)); | ||
138 | |||
139 | IF_SWCL(static inline) struct thread_entry * _wakeup_thread( | ||
140 | struct thread_queue *list IF_SWCL(, const bool nolock)) | ||
141 | __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline))); | ||
142 | |||
143 | IF_SWCL(static inline) void _block_thread( | ||
144 | struct thread_queue *list IF_SWCL(, const bool nolock)) | ||
145 | __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline))); | ||
146 | |||
147 | static void add_to_list_tmo(struct thread_entry *thread) | ||
148 | __attribute__((noinline)); | ||
149 | |||
150 | static void core_schedule_wakeup(struct thread_entry *thread) | ||
151 | __attribute__((noinline)); | ||
152 | |||
153 | static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core)) | ||
154 | __attribute__((always_inline)); | ||
155 | |||
156 | static inline void run_blocking_ops( | ||
157 | IF_COP_VOID(unsigned int core, struct thread_entry *thread)) | ||
158 | __attribute__((always_inline)); | ||
159 | |||
160 | static void thread_stkov(struct thread_entry *thread) | ||
161 | __attribute__((noinline)); | ||
61 | 162 | ||
62 | void switch_thread(bool save_context, struct thread_entry **blocked_list) | 163 | static inline void store_context(void* addr) |
63 | ICODE_ATTR; | 164 | __attribute__((always_inline)); |
64 | 165 | ||
65 | static inline void store_context(void* addr) __attribute__ ((always_inline)); | ||
66 | static inline void load_context(const void* addr) | 166 | static inline void load_context(const void* addr) |
67 | __attribute__ ((always_inline)); | 167 | __attribute__((always_inline)); |
68 | static inline void core_sleep(void) __attribute__((always_inline)); | 168 | |
169 | void switch_thread(struct thread_entry *old) | ||
170 | __attribute__((noinline)); | ||
171 | |||
172 | |||
173 | /**************************************************************************** | ||
174 | * Processor-specific section | ||
175 | */ | ||
69 | 176 | ||
70 | #if defined(CPU_ARM) | 177 | #if defined(CPU_ARM) |
71 | /*--------------------------------------------------------------------------- | 178 | /*--------------------------------------------------------------------------- |
@@ -94,6 +201,14 @@ static void start_thread(void) | |||
94 | ); /* No clobber list - new thread doesn't care */ | 201 | ); /* No clobber list - new thread doesn't care */ |
95 | } | 202 | } |
96 | 203 | ||
204 | /* For startup, place context pointer in r4 slot, start_thread pointer in r5 | ||
205 | * slot, and thread function pointer in context.start. See load_context for | ||
206 | * what happens when thread is initially going to run. */ | ||
207 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
208 | ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \ | ||
209 | (thread)->context.r[1] = (unsigned int)start_thread, \ | ||
210 | (thread)->context.start = (void *)function; }) | ||
211 | |||
97 | /*--------------------------------------------------------------------------- | 212 | /*--------------------------------------------------------------------------- |
98 | * Store non-volatile context. | 213 | * Store non-volatile context. |
99 | *--------------------------------------------------------------------------- | 214 | *--------------------------------------------------------------------------- |
@@ -106,14 +221,10 @@ static inline void store_context(void* addr) | |||
106 | ); | 221 | ); |
107 | } | 222 | } |
108 | 223 | ||
109 | /* For startup, place context pointer in r4 slot, start_thread pointer in r5 | 224 | /*--------------------------------------------------------------------------- |
110 | * slot, and thread function pointer in context.start. See load_context for | 225 | * Load non-volatile context. |
111 | * what happens when thread is initially going to run. */ | 226 | *--------------------------------------------------------------------------- |
112 | #define THREAD_STARTUP_INIT(core, thread, function) \ | 227 | */ |
113 | ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \ | ||
114 | (thread)->context.r[1] = (unsigned int)start_thread, \ | ||
115 | (thread)->context.start = (void *)function; }) | ||
116 | |||
117 | static inline void load_context(const void* addr) | 228 | static inline void load_context(const void* addr) |
118 | { | 229 | { |
119 | asm volatile( | 230 | asm volatile( |
@@ -139,14 +250,226 @@ static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR = | |||
139 | }; | 250 | }; |
140 | #endif /* NUM_CORES */ | 251 | #endif /* NUM_CORES */ |
141 | 252 | ||
142 | static inline void core_sleep(void) | 253 | #if CONFIG_CORELOCK == SW_CORELOCK |
254 | /* Software core locks using Peterson's mutual exclusion algorithm */ | ||
255 | |||
256 | /*--------------------------------------------------------------------------- | ||
257 | * Initialize the corelock structure. | ||
258 | *--------------------------------------------------------------------------- | ||
259 | */ | ||
260 | void corelock_init(struct corelock *cl) | ||
143 | { | 261 | { |
144 | /* This should sleep the CPU. It appears to wake by itself on | 262 | memset(cl, 0, sizeof (*cl)); |
145 | interrupts */ | 263 | } |
146 | if (CURRENT_CORE == CPU) | 264 | |
147 | CPU_CTL = PROC_SLEEP; | 265 | #if 1 /* Assembly locks to minimize overhead */ |
148 | else | 266 | /*--------------------------------------------------------------------------- |
149 | COP_CTL = PROC_SLEEP; | 267 | * Wait for the corelock to become free and acquire it when it does. |
268 | *--------------------------------------------------------------------------- | ||
269 | */ | ||
270 | void corelock_lock(struct corelock *cl) __attribute__((naked)); | ||
271 | void corelock_lock(struct corelock *cl) | ||
272 | { | ||
273 | asm volatile ( | ||
274 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
275 | "ldrb r1, [r1] \n" | ||
276 | "mov r3, #1 \n" /* cl->myl[core] = 1 */ | ||
277 | "strb r3, [r0, r1, lsr #7] \n" | ||
278 | "and r2, r1, #1 \n" /* r2 = othercore */ | ||
279 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
280 | "1: \n" | ||
281 | "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */ | ||
282 | "cmp r3, #1 \n" | ||
283 | "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore ? */ | ||
284 | "cmpeq r3, r2 \n" | ||
285 | "bxne lr \n" /* no? lock acquired */ | ||
286 | "b 1b \n" /* keep trying */ | ||
287 | : : "i"(&PROCESSOR_ID) | ||
288 | ); | ||
289 | (void)cl; | ||
290 | } | ||
291 | |||
292 | /*--------------------------------------------------------------------------- | ||
293 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
294 | *--------------------------------------------------------------------------- | ||
295 | */ | ||
296 | int corelock_try_lock(struct corelock *cl) __attribute__((naked)); | ||
297 | int corelock_try_lock(struct corelock *cl) | ||
298 | { | ||
299 | asm volatile ( | ||
300 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
301 | "ldrb r1, [r1] \n" | ||
302 | "mov r3, #1 \n" /* cl->myl[core] = 1 */ | ||
303 | "strb r3, [r0, r1, lsr #7] \n" | ||
304 | "and r2, r1, #1 \n" /* r2 = othercore */ | ||
305 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
306 | "1: \n" | ||
307 | "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */ | ||
308 | "cmp r3, #1 \n" | ||
309 | "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore? */ | ||
310 | "cmpeq r3, r2 \n" | ||
311 | "movne r0, #1 \n" /* no? lock acquired */ | ||
312 | "bxne lr \n" | ||
313 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ | ||
314 | "strb r2, [r0, r1, lsr #7] \n" | ||
315 | "mov r0, r2 \n" | ||
316 | "bx lr \n" /* acquisition failed */ | ||
317 | : : "i"(&PROCESSOR_ID) | ||
318 | ); | ||
319 | |||
320 | return 0; | ||
321 | (void)cl; | ||
322 | } | ||
323 | |||
324 | /*--------------------------------------------------------------------------- | ||
325 | * Release ownership of the corelock | ||
326 | *--------------------------------------------------------------------------- | ||
327 | */ | ||
328 | void corelock_unlock(struct corelock *cl) __attribute__((naked)); | ||
329 | void corelock_unlock(struct corelock *cl) | ||
330 | { | ||
331 | asm volatile ( | ||
332 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
333 | "ldrb r1, [r1] \n" | ||
334 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ | ||
335 | "strb r2, [r0, r1, lsr #7] \n" | ||
336 | "bx lr \n" | ||
337 | : : "i"(&PROCESSOR_ID) | ||
338 | ); | ||
339 | (void)cl; | ||
340 | } | ||
341 | #else /* C versions for reference */ | ||
342 | /*--------------------------------------------------------------------------- | ||
343 | * Wait for the corelock to become free and aquire it when it does. | ||
344 | *--------------------------------------------------------------------------- | ||
345 | */ | ||
346 | void corelock_lock(struct corelock *cl) | ||
347 | { | ||
348 | const unsigned int core = CURRENT_CORE; | ||
349 | const unsigned int othercore = 1 - core; | ||
350 | |||
351 | cl->myl[core] = 1; | ||
352 | cl->turn = othercore; | ||
353 | |||
354 | while (cl->myl[othercore] == 1 && cl->turn == othercore); | ||
355 | } | ||
356 | |||
357 | /*--------------------------------------------------------------------------- | ||
358 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
359 | *--------------------------------------------------------------------------- | ||
360 | */ | ||
361 | int corelock_try_lock(struct corelock *cl) | ||
362 | { | ||
363 | const unsigned int core = CURRENT_CORE; | ||
364 | const unsigned int othercore = 1 - core; | ||
365 | |||
366 | cl->myl[core] = 1; | ||
367 | cl->turn = othercore; | ||
368 | |||
369 | if (cl->myl[othercore] == 1 && cl->turn == othercore) | ||
370 | { | ||
371 | cl->myl[core] = 0; | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | return 1; | ||
376 | } | ||
377 | |||
378 | /*--------------------------------------------------------------------------- | ||
379 | * Release ownership of the corelock | ||
380 | *--------------------------------------------------------------------------- | ||
381 | */ | ||
382 | void corelock_unlock(struct corelock *cl) | ||
383 | { | ||
384 | cl->myl[CURRENT_CORE] = 0; | ||
385 | } | ||
386 | #endif /* ASM / C selection */ | ||
387 | |||
388 | #endif /* CONFIG_CORELOCK == SW_CORELOCK */ | ||
389 | |||
390 | /*--------------------------------------------------------------------------- | ||
391 | * Put core in a power-saving state if waking list wasn't repopulated and if | ||
392 | * no other core requested a wakeup for it to perform a task. | ||
393 | *--------------------------------------------------------------------------- | ||
394 | */ | ||
395 | static inline void core_sleep(IF_COP(unsigned int core,) struct thread_entry **waking) | ||
396 | { | ||
397 | #if NUM_CORES > 1 | ||
398 | #ifdef CPU_PP502x | ||
399 | /* Disabling IRQ and FIQ is important to making the fixed-time sequence | ||
400 | * non-interruptable */ | ||
401 | asm volatile ( | ||
402 | "mrs r2, cpsr \n" /* Disable IRQ, FIQ */ | ||
403 | "orr r2, r2, #0xc0 \n" | ||
404 | "msr cpsr_c, r2 \n" | ||
405 | "ldr r0, [%[w]] \n" /* Check *waking */ | ||
406 | "cmp r0, #0 \n" /* != NULL -> exit */ | ||
407 | "bne 1f \n" | ||
408 | /* ------ fixed-time sequence ----- */ | ||
409 | "ldr r0, [%[ms], %[oc], lsl #2] \n" /* Stay-awake requested? */ | ||
410 | "mov r1, #0x80000000 \n" | ||
411 | "tst r0, #1 \n" | ||
412 | "streq r1, [%[ct], %[c], lsl #2] \n" /* Sleep if not */ | ||
413 | "nop \n" | ||
414 | "mov r0, #0 \n" | ||
415 | "str r0, [%[ct], %[c], lsl #2] \n" /* Clear control reg */ | ||
416 | /* -------------------------------- */ | ||
417 | "1: \n" | ||
418 | "mov r0, #1 \n" | ||
419 | "add r1, %[ms], #8 \n" | ||
420 | "str r0, [r1, %[oc], lsl #2] \n" /* Clear mailbox */ | ||
421 | "bic r2, r2, #0xc0 \n" /* Enable interrupts */ | ||
422 | "msr cpsr_c, r2 \n" | ||
423 | : | ||
424 | : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)), | ||
425 | [c]"r" (core), [oc]"r"(1-core), [w]"r"(waking) | ||
426 | : "r0", "r1", "r2"); | ||
427 | #else | ||
428 | /* TODO: PP5002 */ | ||
429 | #endif /* CONFIG_CPU == */ | ||
430 | #else | ||
431 | set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS); | ||
432 | if (*waking == NULL) | ||
433 | { | ||
434 | PROC_CTL(IF_COP_CORE(core)) = PROC_SLEEP; | ||
435 | } | ||
436 | set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); | ||
437 | #endif /* NUM_CORES */ | ||
438 | } | ||
439 | |||
440 | /*--------------------------------------------------------------------------- | ||
441 | * Wake another processor core that is sleeping or prevent it from doing so | ||
442 | * if it was already destined. FIQ, IRQ should be disabled before calling. | ||
443 | *--------------------------------------------------------------------------- | ||
444 | */ | ||
445 | void core_wake(IF_COP_VOID(unsigned int othercore)) | ||
446 | { | ||
447 | #if NUM_CORES == 1 | ||
448 | /* No wakey - core already wakey */ | ||
449 | #elif defined (CPU_PP502x) | ||
450 | /* avoid r0 since that contains othercore */ | ||
451 | asm volatile ( | ||
452 | "mrs r2, cpsr \n" | ||
453 | "orr r1, r2, #0xc0 \n" | ||
454 | "msr cpsr_c, r1 \n" | ||
455 | "mov r1, #1 \n" | ||
456 | /* ------ fixed-time sequence ----- */ | ||
457 | "str r1, [%[ms], %[oc], lsl #2] \n" /* Send stay-awake message */ | ||
458 | "nop \n" | ||
459 | "nop \n" | ||
460 | "ldr r1, [%[ct], %[oc], lsl #2] \n" /* Wake other core if asleep */ | ||
461 | "tst r1, #0x80000000 \n" | ||
462 | "bic r1, r1, #0x80000000 \n" | ||
463 | "strne r1, [%[ct], %[oc], lsl #2] \n" | ||
464 | /* -------------------------------- */ | ||
465 | "msr cpsr_c, r2 \n" | ||
466 | : | ||
467 | : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)), | ||
468 | [oc]"r" (othercore) | ||
469 | : "r1", "r2"); | ||
470 | #else | ||
471 | PROC_CTL(othercore) = PROC_WAKE; | ||
472 | #endif | ||
150 | } | 473 | } |
151 | 474 | ||
152 | #if NUM_CORES > 1 | 475 | #if NUM_CORES > 1 |
@@ -167,22 +490,120 @@ static inline void switch_to_idle_stack(const unsigned int core) | |||
167 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); | 490 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); |
168 | (void)core; | 491 | (void)core; |
169 | } | 492 | } |
493 | |||
494 | /*--------------------------------------------------------------------------- | ||
495 | * Perform core switch steps that need to take place inside switch_thread. | ||
496 | * | ||
497 | * These steps must take place while before changing the processor and after | ||
498 | * having entered switch_thread since switch_thread may not do a normal return | ||
499 | * because the stack being used for anything the compiler saved will not belong | ||
500 | * to the thread's destination core and it may have been recycled for other | ||
501 | * purposes by the time a normal context load has taken place. switch_thread | ||
502 | * will also clobber anything stashed in the thread's context or stored in the | ||
503 | * nonvolatile registers if it is saved there before the call since the | ||
504 | * compiler's order of operations cannot be known for certain. | ||
505 | */ | ||
506 | static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) | ||
507 | { | ||
508 | /* Flush our data to ram */ | ||
509 | flush_icache(); | ||
510 | /* Stash thread in r4 slot */ | ||
511 | thread->context.r[0] = (unsigned int)thread; | ||
512 | /* Stash restart address in r5 slot */ | ||
513 | thread->context.r[1] = (unsigned int)thread->context.start; | ||
514 | /* Save sp in context.sp while still running on old core */ | ||
515 | thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1]; | ||
516 | } | ||
517 | |||
518 | /*--------------------------------------------------------------------------- | ||
519 | * Machine-specific helper function for switching the processor a thread is | ||
520 | * running on. Basically, the thread suicides on the departing core and is | ||
521 | * reborn on the destination. Were it not for gcc's ill-behavior regarding | ||
522 | * naked functions written in C where it actually clobbers non-volatile | ||
523 | * registers before the intended prologue code, this would all be much | ||
524 | * simpler. Generic setup is done in switch_core itself. | ||
525 | */ | ||
526 | |||
527 | /*--------------------------------------------------------------------------- | ||
528 | * This actually performs the core switch. | ||
529 | */ | ||
530 | static void switch_thread_core(unsigned int core, struct thread_entry *thread) | ||
531 | __attribute__((naked)); | ||
532 | static void switch_thread_core(unsigned int core, struct thread_entry *thread) | ||
533 | { | ||
534 | /* Pure asm for this because compiler behavior isn't sufficiently predictable. | ||
535 | * Stack access also isn't permitted until restoring the original stack and | ||
536 | * context. */ | ||
537 | asm volatile ( | ||
538 | "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */ | ||
539 | "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ | ||
540 | "ldr r2, [r2, r0, lsl #2] \n" | ||
541 | "add r2, r2, %0*4 \n" | ||
542 | "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ | ||
543 | "mov sp, r2 \n" /* switch stacks */ | ||
544 | "adr r2, 1f \n" /* r2 = new core restart address */ | ||
545 | "str r2, [r1, #40] \n" /* thread->context.start = r2 */ | ||
546 | "mov r0, r1 \n" /* switch_thread(thread) */ | ||
547 | "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ | ||
548 | "1: \n" | ||
549 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ | ||
550 | "mov r1, #0 \n" /* Clear start address */ | ||
551 | "str r1, [r0, #40] \n" | ||
552 | "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */ | ||
553 | "mov lr, pc \n" | ||
554 | "bx r0 \n" | ||
555 | "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */ | ||
556 | ".ltorg \n" /* Dump constant pool */ | ||
557 | : : "i"(IDLE_STACK_WORDS) | ||
558 | ); | ||
559 | (void)core; (void)thread; | ||
560 | } | ||
170 | #endif /* NUM_CORES */ | 561 | #endif /* NUM_CORES */ |
171 | 562 | ||
172 | #elif CONFIG_CPU == S3C2440 | 563 | #elif CONFIG_CPU == S3C2440 |
173 | static inline void core_sleep(void) | 564 | |
565 | /*--------------------------------------------------------------------------- | ||
566 | * Put core in a power-saving state if waking list wasn't repopulated. | ||
567 | *--------------------------------------------------------------------------- | ||
568 | */ | ||
569 | static inline void core_sleep(struct thread_entry **waking) | ||
174 | { | 570 | { |
175 | int i; | 571 | /* FIQ also changes the CLKCON register so FIQ must be disabled |
176 | CLKCON |= (1 << 2); /* set IDLE bit */ | 572 | when changing it here */ |
177 | for(i=0; i<10; i++); /* wait for IDLE */ | 573 | asm volatile ( |
178 | CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */ | 574 | "mrs r0, cpsr \n" /* Disable IRQ, FIQ */ |
575 | "orr r0, r0, #0xc0 \n" | ||
576 | "msr cpsr_c, r0 \n" | ||
577 | "ldr r1, [%0] \n" /* Check *waking */ | ||
578 | "cmp r1, #0 \n" | ||
579 | "bne 2f \n" /* != NULL -> exit */ | ||
580 | "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */ | ||
581 | "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */ | ||
582 | "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */ | ||
583 | "orr r2, r2, #4 \n" | ||
584 | "str r2, [r1, #0xc] \n" | ||
585 | "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */ | ||
586 | "mov r3, #0 \n" /* wait for IDLE */ | ||
587 | "1: \n" | ||
588 | "add r3, r3, #1 \n" | ||
589 | "cmp r3, #10 \n" | ||
590 | "bne 1b \n" | ||
591 | "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */ | ||
592 | "msr cpsr_c, r0 \n" | ||
593 | "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */ | ||
594 | "bic r2, r2, #4 \n" | ||
595 | "str r2, [r1, #0xc] \n" | ||
596 | "2: \n" | ||
597 | "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */ | ||
598 | "msr cpsr_c, r0 \n" | ||
599 | : : "r"(waking) : "r0", "r1", "r2", "r3"); | ||
179 | } | 600 | } |
180 | #else | 601 | #else |
181 | static inline void core_sleep(void) | 602 | static inline void core_sleep(void) |
182 | { | 603 | { |
183 | 604 | ||
184 | } | 605 | } |
185 | #endif | 606 | #endif /* CONFIG_CPU == */ |
186 | 607 | ||
187 | #elif defined(CPU_COLDFIRE) | 608 | #elif defined(CPU_COLDFIRE) |
188 | /*--------------------------------------------------------------------------- | 609 | /*--------------------------------------------------------------------------- |
@@ -252,17 +673,28 @@ static inline void load_context(const void* addr) | |||
252 | ); | 673 | ); |
253 | } | 674 | } |
254 | 675 | ||
255 | static inline void core_sleep(void) | 676 | /*--------------------------------------------------------------------------- |
677 | * Put core in a power-saving state if waking list wasn't repopulated. | ||
678 | *--------------------------------------------------------------------------- | ||
679 | */ | ||
680 | static inline void core_sleep(struct thread_entry **waking) | ||
256 | { | 681 | { |
257 | asm volatile ("stop #0x2000"); | 682 | asm volatile ( |
258 | } | 683 | "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */ |
259 | 684 | "lsl.l #8, %%d0 \n" | |
260 | /* Set EMAC unit to fractional mode with saturation for each new thread, | 685 | "move.w %%d0, %%sr \n" |
261 | since that's what'll be the most useful for most things which the dsp | 686 | "tst.l (%0) \n" /* Check *waking */ |
262 | will do. Codecs should still initialize their preferred modes | 687 | "beq.b 1f \n" /* != NULL -> exit */ |
263 | explicitly. */ | 688 | "moveq.l #0x20, %%d0 \n" /* Enable interrupts */ |
264 | #define THREAD_CPU_INIT(core, thread) \ | 689 | "lsl.l #8, %%d0 \n" |
265 | ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; }) | 690 | "move.w %%d0, %%sr \n" |
691 | ".word 0x51fb \n" /* tpf.l - eat stop instruction */ | ||
692 | "1: \n" | ||
693 | "stop #0x2000 \n" /* Supervisor mode, interrupts enabled | ||
694 | upon wakeup */ | ||
695 | : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0" | ||
696 | ); | ||
697 | }; | ||
266 | 698 | ||
267 | #elif CONFIG_CPU == SH7034 | 699 | #elif CONFIG_CPU == SH7034 |
268 | /*--------------------------------------------------------------------------- | 700 | /*--------------------------------------------------------------------------- |
@@ -342,18 +774,37 @@ static inline void load_context(const void* addr) | |||
342 | ); | 774 | ); |
343 | } | 775 | } |
344 | 776 | ||
345 | static inline void core_sleep(void) | 777 | /*--------------------------------------------------------------------------- |
778 | * Put core in a power-saving state if waking list wasn't repopulated. | ||
779 | *--------------------------------------------------------------------------- | ||
780 | */ | ||
781 | static inline void core_sleep(struct thread_entry **waking) | ||
346 | { | 782 | { |
347 | and_b(0x7F, &SBYCR); | 783 | asm volatile ( |
348 | asm volatile ("sleep"); | 784 | "mov %2, r1 \n" /* Disable interrupts */ |
785 | "ldc r1, sr \n" | ||
786 | "mov.l @%1, r1 \n" /* Check *waking */ | ||
787 | "tst r1, r1 \n" | ||
788 | "bf 1f \n" /* *waking != NULL ? exit */ | ||
789 | "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */ | ||
790 | "mov #0, r1 \n" /* Enable interrupts */ | ||
791 | "ldc r1, sr \n" /* Following instruction cannot be interrupted */ | ||
792 | "bra 2f \n" /* bra and sleep are executed at once */ | ||
793 | "sleep \n" /* Execute standby */ | ||
794 | "1: \n" | ||
795 | "mov #0, r1 \n" /* Enable interrupts */ | ||
796 | "ldc r1, sr \n" | ||
797 | "2: \n" | ||
798 | : | ||
799 | : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL) | ||
800 | : "r1"); | ||
349 | } | 801 | } |
350 | 802 | ||
351 | #endif | 803 | #endif /* CONFIG_CPU == */ |
352 | 804 | ||
353 | #ifndef THREAD_CPU_INIT | 805 | /* |
354 | /* No cpu specific init - make empty */ | 806 | * End Processor-specific section |
355 | #define THREAD_CPU_INIT(core, thread) | 807 | ***************************************************************************/ |
356 | #endif | ||
357 | 808 | ||
358 | #if THREAD_EXTRA_CHECKS | 809 | #if THREAD_EXTRA_CHECKS |
359 | static void thread_panicf(const char *msg, struct thread_entry *thread) | 810 | static void thread_panicf(const char *msg, struct thread_entry *thread) |
@@ -387,462 +838,1030 @@ static void thread_stkov(struct thread_entry *thread) | |||
387 | #define THREAD_ASSERT(exp, msg, thread) | 838 | #define THREAD_ASSERT(exp, msg, thread) |
388 | #endif /* THREAD_EXTRA_CHECKS */ | 839 | #endif /* THREAD_EXTRA_CHECKS */ |
389 | 840 | ||
390 | static void add_to_list(struct thread_entry **list, struct thread_entry *thread) | 841 | /*--------------------------------------------------------------------------- |
842 | * Lock a list pointer and returns its value | ||
843 | *--------------------------------------------------------------------------- | ||
844 | */ | ||
845 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
846 | /* Separate locking function versions */ | ||
847 | |||
848 | /* Thread locking */ | ||
849 | #define GET_THREAD_STATE(thread) \ | ||
850 | ({ corelock_lock(&(thread)->cl); (thread)->state; }) | ||
851 | #define TRY_GET_THREAD_STATE(thread) \ | ||
852 | ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; }) | ||
853 | #define UNLOCK_THREAD(thread, state) \ | ||
854 | ({ corelock_unlock(&(thread)->cl); }) | ||
855 | #define UNLOCK_THREAD_SET_STATE(thread, _state) \ | ||
856 | ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); }) | ||
857 | |||
858 | /* List locking */ | ||
859 | #define LOCK_LIST(tqp) \ | ||
860 | ({ corelock_lock(&(tqp)->cl); (tqp)->queue; }) | ||
861 | #define UNLOCK_LIST(tqp, mod) \ | ||
862 | ({ corelock_unlock(&(tqp)->cl); }) | ||
863 | #define UNLOCK_LIST_SET_PTR(tqp, mod) \ | ||
864 | ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); }) | ||
865 | |||
866 | /* Select the queue pointer directly */ | ||
867 | #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \ | ||
868 | ({ add_to_list_l(&(tqp)->queue, (thread)); }) | ||
869 | #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \ | ||
870 | ({ remove_from_list_l(&(tqp)->queue, (thread)); }) | ||
871 | |||
872 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
873 | /* Native swap/exchange versions */ | ||
874 | |||
875 | /* Thread locking */ | ||
876 | #define GET_THREAD_STATE(thread) \ | ||
877 | ({ unsigned _s; \ | ||
878 | while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \ | ||
879 | _s; }) | ||
880 | #define TRY_GET_THREAD_STATE(thread) \ | ||
881 | ({ xchg8(&(thread)->state, STATE_BUSY); }) | ||
882 | #define UNLOCK_THREAD(thread, _state) \ | ||
883 | ({ (thread)->state = (_state); }) | ||
884 | #define UNLOCK_THREAD_SET_STATE(thread, _state) \ | ||
885 | ({ (thread)->state = (_state); }) | ||
886 | |||
887 | /* List locking */ | ||
888 | #define LOCK_LIST(tqp) \ | ||
889 | ({ struct thread_entry *_l; \ | ||
890 | while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \ | ||
891 | _l; }) | ||
892 | #define UNLOCK_LIST(tqp, mod) \ | ||
893 | ({ (tqp)->queue = (mod); }) | ||
894 | #define UNLOCK_LIST_SET_PTR(tqp, mod) \ | ||
895 | ({ (tqp)->queue = (mod); }) | ||
896 | |||
897 | /* Select the local queue pointer copy returned from LOCK_LIST */ | ||
898 | #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \ | ||
899 | ({ add_to_list_l(&(tc), (thread)); }) | ||
900 | #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \ | ||
901 | ({ remove_from_list_l(&(tc), (thread)); }) | ||
902 | |||
903 | #else | ||
904 | /* Single-core/non-locked versions */ | ||
905 | |||
906 | /* Threads */ | ||
907 | #define GET_THREAD_STATE(thread) \ | ||
908 | ({ (thread)->state; }) | ||
909 | #define UNLOCK_THREAD(thread, _state) | ||
910 | #define UNLOCK_THREAD_SET_STATE(thread, _state) \ | ||
911 | ({ (thread)->state = (_state); }) | ||
912 | |||
913 | /* Lists */ | ||
914 | #define LOCK_LIST(tqp) \ | ||
915 | ({ (tqp)->queue; }) | ||
916 | #define UNLOCK_LIST(tqp, mod) | ||
917 | #define UNLOCK_LIST_SET_PTR(tqp, mod) \ | ||
918 | ({ (tqp)->queue = (mod); }) | ||
919 | |||
920 | /* Select the queue pointer directly */ | ||
921 | #define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \ | ||
922 | ({ add_to_list_l(&(tqp)->queue, (thread)); }) | ||
923 | #define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \ | ||
924 | ({ remove_from_list_l(&(tqp)->queue, (thread)); }) | ||
925 | |||
926 | #endif /* locking selection */ | ||
927 | |||
928 | #if THREAD_EXTRA_CHECKS | ||
929 | /*--------------------------------------------------------------------------- | ||
930 | * Lock the thread slot to obtain the state and then unlock it. Waits for | ||
931 | * it not to be busy. Used for debugging. | ||
932 | *--------------------------------------------------------------------------- | ||
933 | */ | ||
934 | static unsigned peek_thread_state(struct thread_entry *thread) | ||
935 | { | ||
936 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
937 | unsigned state = GET_THREAD_STATE(thread); | ||
938 | UNLOCK_THREAD(thread, state); | ||
939 | set_irq_level(oldlevel); | ||
940 | return state; | ||
941 | } | ||
942 | #endif /* THREAD_EXTRA_CHECKS */ | ||
943 | |||
944 | /*--------------------------------------------------------------------------- | ||
945 | * Adds a thread to a list of threads using "intert last". Uses the "l" | ||
946 | * links. | ||
947 | *--------------------------------------------------------------------------- | ||
948 | */ | ||
949 | static void add_to_list_l(struct thread_entry **list, | ||
950 | struct thread_entry *thread) | ||
391 | { | 951 | { |
392 | if (*list == NULL) | 952 | struct thread_entry *l = *list; |
953 | |||
954 | if (l == NULL) | ||
393 | { | 955 | { |
394 | thread->next = thread; | 956 | /* Insert into unoccupied list */ |
395 | thread->prev = thread; | 957 | thread->l.next = thread; |
958 | thread->l.prev = thread; | ||
396 | *list = thread; | 959 | *list = thread; |
960 | return; | ||
397 | } | 961 | } |
398 | else | 962 | |
399 | { | 963 | /* Insert last */ |
400 | /* Insert last */ | 964 | thread->l.next = l; |
401 | thread->next = *list; | 965 | thread->l.prev = l->l.prev; |
402 | thread->prev = (*list)->prev; | 966 | thread->l.prev->l.next = thread; |
403 | thread->prev->next = thread; | 967 | l->l.prev = thread; |
404 | (*list)->prev = thread; | 968 | |
405 | 969 | /* Insert next | |
406 | /* Insert next | 970 | thread->l.next = l->l.next; |
407 | thread->next = (*list)->next; | 971 | thread->l.prev = l; |
408 | thread->prev = *list; | 972 | thread->l.next->l.prev = thread; |
409 | thread->next->prev = thread; | 973 | l->l.next = thread; |
410 | (*list)->next = thread; | 974 | */ |
411 | */ | 975 | } |
412 | } | 976 | |
977 | /*--------------------------------------------------------------------------- | ||
978 | * Locks a list, adds the thread entry and unlocks the list on multicore. | ||
979 | * Defined as add_to_list_l on single-core. | ||
980 | *--------------------------------------------------------------------------- | ||
981 | */ | ||
982 | #if NUM_CORES > 1 | ||
983 | static void add_to_list_l_locked(struct thread_queue *tq, | ||
984 | struct thread_entry *thread) | ||
985 | { | ||
986 | struct thread_entry *t = LOCK_LIST(tq); | ||
987 | ADD_TO_LIST_L_SELECT(t, tq, thread); | ||
988 | UNLOCK_LIST(tq, t); | ||
989 | (void)t; | ||
413 | } | 990 | } |
991 | #else | ||
992 | #define add_to_list_l_locked(tq, thread) \ | ||
993 | add_to_list_l(&(tq)->queue, (thread)) | ||
994 | #endif | ||
414 | 995 | ||
415 | static void remove_from_list(struct thread_entry **list, | 996 | /*--------------------------------------------------------------------------- |
416 | struct thread_entry *thread) | 997 | * Removes a thread from a list of threads. Uses the "l" links. |
998 | *--------------------------------------------------------------------------- | ||
999 | */ | ||
1000 | static void remove_from_list_l(struct thread_entry **list, | ||
1001 | struct thread_entry *thread) | ||
417 | { | 1002 | { |
418 | if (list != NULL) | 1003 | struct thread_entry *prev, *next; |
1004 | |||
1005 | next = thread->l.next; | ||
1006 | |||
1007 | if (thread == next) | ||
419 | { | 1008 | { |
420 | if (thread == thread->next) | 1009 | /* The only item */ |
421 | { | 1010 | *list = NULL; |
422 | *list = NULL; | 1011 | return; |
423 | return; | 1012 | } |
424 | } | 1013 | |
425 | 1014 | if (thread == *list) | |
426 | if (thread == *list) | 1015 | { |
427 | *list = thread->next; | 1016 | /* List becomes next item */ |
1017 | *list = next; | ||
428 | } | 1018 | } |
1019 | |||
1020 | prev = thread->l.prev; | ||
429 | 1021 | ||
430 | /* Fix links to jump over the removed entry. */ | 1022 | /* Fix links to jump over the removed entry. */ |
431 | thread->prev->next = thread->next; | 1023 | prev->l.next = next; |
432 | thread->next->prev = thread->prev; | 1024 | next->l.prev = prev; |
433 | } | 1025 | } |
434 | 1026 | ||
435 | static void check_sleepers(void) __attribute__ ((noinline)); | 1027 | /*--------------------------------------------------------------------------- |
436 | static void check_sleepers(void) | 1028 | * Locks a list, removes the thread entry and unlocks the list on multicore. |
1029 | * Defined as remove_from_list_l on single-core. | ||
1030 | *--------------------------------------------------------------------------- | ||
1031 | */ | ||
1032 | #if NUM_CORES > 1 | ||
1033 | static void remove_from_list_l_locked(struct thread_queue *tq, | ||
1034 | struct thread_entry *thread) | ||
437 | { | 1035 | { |
438 | const unsigned int core = CURRENT_CORE; | 1036 | struct thread_entry *t = LOCK_LIST(tq); |
439 | struct thread_entry *current, *next; | 1037 | REMOVE_FROM_LIST_L_SELECT(t, tq, thread); |
440 | 1038 | UNLOCK_LIST(tq, t); | |
441 | /* Check sleeping threads. */ | 1039 | (void)t; |
442 | current = cores[core].sleeping; | 1040 | } |
443 | 1041 | #else | |
444 | for (;;) | 1042 | #define remove_from_list_l_locked(tq, thread) \ |
1043 | remove_from_list_l(&(tq)->queue, (thread)) | ||
1044 | #endif | ||
1045 | |||
1046 | /*--------------------------------------------------------------------------- | ||
1047 | * Add a thread from the core's timout list by linking the pointers in its | ||
1048 | * tmo structure. | ||
1049 | *--------------------------------------------------------------------------- | ||
1050 | */ | ||
1051 | static void add_to_list_tmo(struct thread_entry *thread) | ||
1052 | { | ||
1053 | /* Insert first */ | ||
1054 | struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout; | ||
1055 | |||
1056 | thread->tmo.prev = thread; | ||
1057 | thread->tmo.next = t; | ||
1058 | |||
1059 | if (t != NULL) | ||
1060 | { | ||
1061 | /* Fix second item's prev pointer to point to this thread */ | ||
1062 | t->tmo.prev = thread; | ||
1063 | } | ||
1064 | |||
1065 | cores[IF_COP_CORE(thread->core)].timeout = thread; | ||
1066 | } | ||
1067 | |||
1068 | /*--------------------------------------------------------------------------- | ||
1069 | * Remove a thread from the core's timout list by unlinking the pointers in | ||
1070 | * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout | ||
1071 | * is cancelled. | ||
1072 | *--------------------------------------------------------------------------- | ||
1073 | */ | ||
1074 | static void remove_from_list_tmo(struct thread_entry *thread) | ||
1075 | { | ||
1076 | struct thread_entry *next = thread->tmo.next; | ||
1077 | struct thread_entry *prev; | ||
1078 | |||
1079 | if (thread == cores[IF_COP_CORE(thread->core)].timeout) | ||
445 | { | 1080 | { |
446 | next = current->next; | 1081 | /* Next item becomes list head */ |
447 | 1082 | cores[IF_COP_CORE(thread->core)].timeout = next; | |
448 | if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg)) | 1083 | |
1084 | if (next != NULL) | ||
449 | { | 1085 | { |
450 | /* Sleep timeout has been reached so bring the thread | 1086 | /* Fix new list head's prev to point to itself. */ |
451 | * back to life again. */ | 1087 | next->tmo.prev = next; |
452 | remove_from_list(&cores[core].sleeping, current); | ||
453 | add_to_list(&cores[core].running, current); | ||
454 | current->statearg = 0; | ||
455 | |||
456 | /* If there is no more processes in the list, break the loop. */ | ||
457 | if (cores[core].sleeping == NULL) | ||
458 | break; | ||
459 | |||
460 | current = next; | ||
461 | continue; | ||
462 | } | 1088 | } |
463 | 1089 | ||
464 | current = next; | 1090 | thread->tmo.prev = NULL; |
465 | 1091 | return; | |
466 | /* Break the loop once we have walked through the list of all | 1092 | } |
467 | * sleeping processes. */ | 1093 | |
468 | if (current == cores[core].sleeping) | 1094 | prev = thread->tmo.prev; |
469 | break; | 1095 | |
1096 | if (next != NULL) | ||
1097 | { | ||
1098 | next->tmo.prev = prev; | ||
470 | } | 1099 | } |
1100 | |||
1101 | prev->tmo.next = next; | ||
1102 | thread->tmo.prev = NULL; | ||
471 | } | 1103 | } |
472 | 1104 | ||
473 | /* Safely finish waking all threads potentialy woken by interrupts - | 1105 | /*--------------------------------------------------------------------------- |
474 | * statearg already zeroed in wakeup_thread. */ | 1106 | * Schedules a thread wakeup on the specified core. Threads will be made |
475 | static void wake_list_awaken(void) __attribute__ ((noinline)); | 1107 | * ready to run when the next task switch occurs. Note that this does not |
476 | static void wake_list_awaken(void) | 1108 | * introduce an on-core delay since the soonest the next thread may run is |
1109 | * no sooner than that. Other cores and on-core interrupts may only ever | ||
1110 | * add to the list. | ||
1111 | *--------------------------------------------------------------------------- | ||
1112 | */ | ||
1113 | static void core_schedule_wakeup(struct thread_entry *thread) | ||
477 | { | 1114 | { |
478 | const unsigned int core = CURRENT_CORE; | ||
479 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | 1115 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
1116 | const unsigned int core = IF_COP_CORE(thread->core); | ||
1117 | add_to_list_l_locked(&cores[core].waking, thread); | ||
1118 | #if NUM_CORES > 1 | ||
1119 | if (core != CURRENT_CORE) | ||
1120 | { | ||
1121 | core_wake(core); | ||
1122 | } | ||
1123 | #endif | ||
1124 | set_irq_level(oldlevel); | ||
1125 | } | ||
480 | 1126 | ||
481 | /* No need for another check in the IRQ lock since IRQs are allowed | 1127 | /*--------------------------------------------------------------------------- |
482 | only to add threads to the waking list. They won't be adding more | 1128 | * If the waking list was populated, move all threads on it onto the running |
483 | until we're done here though. */ | 1129 | * list so they may be run ASAP. |
484 | 1130 | *--------------------------------------------------------------------------- | |
485 | struct thread_entry *waking = cores[core].waking; | 1131 | */ |
486 | struct thread_entry *running = cores[core].running; | 1132 | static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core)) |
1133 | { | ||
1134 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
1135 | struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking); | ||
1136 | struct thread_entry *r = cores[IF_COP_CORE(core)].running; | ||
487 | 1137 | ||
488 | if (running != NULL) | 1138 | /* Tranfer all threads on waking list to running list in one |
1139 | swoop */ | ||
1140 | if (r != NULL) | ||
489 | { | 1141 | { |
490 | /* Place waking threads at the end of the running list. */ | 1142 | /* Place waking threads at the end of the running list. */ |
491 | struct thread_entry *tmp; | 1143 | struct thread_entry *tmp; |
492 | waking->prev->next = running; | 1144 | w->l.prev->l.next = r; |
493 | running->prev->next = waking; | 1145 | r->l.prev->l.next = w; |
494 | tmp = running->prev; | 1146 | tmp = r->l.prev; |
495 | running->prev = waking->prev; | 1147 | r->l.prev = w->l.prev; |
496 | waking->prev = tmp; | 1148 | w->l.prev = tmp; |
497 | } | 1149 | } |
498 | else | 1150 | else |
499 | { | 1151 | { |
500 | /* Just transfer the list as-is - just came out of a core | 1152 | /* Just transfer the list as-is */ |
501 | * sleep. */ | 1153 | cores[IF_COP_CORE(core)].running = w; |
502 | cores[core].running = waking; | ||
503 | } | 1154 | } |
1155 | /* Just leave any timeout threads on the timeout list. If a timeout check | ||
1156 | * is due, they will be removed there. If they do a timeout again before | ||
1157 | * being removed, they will just stay on the list with a new expiration | ||
1158 | * tick. */ | ||
504 | 1159 | ||
505 | /* Done with waking list */ | 1160 | /* Waking list is clear - NULL and unlock it */ |
506 | cores[core].waking = NULL; | 1161 | UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL); |
507 | set_irq_level(oldlevel); | 1162 | set_irq_level(oldlevel); |
508 | } | 1163 | } |
509 | 1164 | ||
510 | static inline void sleep_core(void) | 1165 | /*--------------------------------------------------------------------------- |
1166 | * Check the core's timeout list when at least one thread is due to wake. | ||
1167 | * Filtering for the condition is done before making the call. Resets the | ||
1168 | * tick when the next check will occur. | ||
1169 | *--------------------------------------------------------------------------- | ||
1170 | */ | ||
1171 | static void check_tmo_threads(void) | ||
511 | { | 1172 | { |
512 | const unsigned int core = CURRENT_CORE; | 1173 | const unsigned int core = CURRENT_CORE; |
1174 | const long tick = current_tick; /* snapshot the current tick */ | ||
1175 | long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */ | ||
1176 | struct thread_entry *next = cores[core].timeout; | ||
513 | 1177 | ||
514 | for (;;) | 1178 | /* If there are no processes waiting for a timeout, just keep the check |
1179 | tick from falling into the past. */ | ||
1180 | if (next != NULL) | ||
515 | { | 1181 | { |
516 | /* We want to do these ASAP as it may change the decision to sleep | 1182 | /* Check sleeping threads. */ |
517 | the core or the core has woken because an interrupt occurred | 1183 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
518 | and posted a message to a queue. */ | ||
519 | if (cores[core].waking != NULL) | ||
520 | wake_list_awaken(); | ||
521 | 1184 | ||
522 | if (cores[core].last_tick != current_tick) | 1185 | do |
523 | { | 1186 | { |
524 | if (cores[core].sleeping != NULL) | 1187 | /* Must make sure noone else is examining the state, wait until |
525 | check_sleepers(); | 1188 | slot is no longer busy */ |
526 | cores[core].last_tick = current_tick; | 1189 | struct thread_entry *curr = next; |
1190 | next = curr->tmo.next; | ||
1191 | |||
1192 | unsigned state = GET_THREAD_STATE(curr); | ||
1193 | |||
1194 | if (state < TIMEOUT_STATE_FIRST) | ||
1195 | { | ||
1196 | /* Cleanup threads no longer on a timeout but still on the | ||
1197 | * list. */ | ||
1198 | remove_from_list_tmo(curr); | ||
1199 | UNLOCK_THREAD(curr, state); /* Unlock thread slot */ | ||
1200 | } | ||
1201 | else if (TIME_BEFORE(tick, curr->tmo_tick)) | ||
1202 | { | ||
1203 | /* Timeout still pending - this will be the usual case */ | ||
1204 | if (TIME_BEFORE(curr->tmo_tick, next_tmo_check)) | ||
1205 | { | ||
1206 | /* Earliest timeout found so far - move the next check up | ||
1207 | to its time */ | ||
1208 | next_tmo_check = curr->tmo_tick; | ||
1209 | } | ||
1210 | UNLOCK_THREAD(curr, state); /* Unlock thread slot */ | ||
1211 | } | ||
1212 | else | ||
1213 | { | ||
1214 | /* Sleep timeout has been reached so bring the thread back to | ||
1215 | * life again. */ | ||
1216 | if (state == STATE_BLOCKED_W_TMO) | ||
1217 | { | ||
1218 | remove_from_list_l_locked(curr->bqp, curr); | ||
1219 | } | ||
1220 | |||
1221 | remove_from_list_tmo(curr); | ||
1222 | add_to_list_l(&cores[core].running, curr); | ||
1223 | UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING); | ||
1224 | } | ||
1225 | |||
1226 | /* Break the loop once we have walked through the list of all | ||
1227 | * sleeping processes or have removed them all. */ | ||
527 | } | 1228 | } |
528 | 1229 | while (next != NULL); | |
529 | /* We must sleep until there is at least one process in the list | ||
530 | * of running processes. */ | ||
531 | if (cores[core].running != NULL) | ||
532 | break; | ||
533 | 1230 | ||
534 | /* Enter sleep mode to reduce power usage, woken up on interrupt */ | 1231 | set_irq_level(oldlevel); |
535 | core_sleep(); | ||
536 | } | 1232 | } |
1233 | |||
1234 | cores[core].next_tmo_check = next_tmo_check; | ||
537 | } | 1235 | } |
538 | 1236 | ||
539 | #ifdef RB_PROFILE | 1237 | /*--------------------------------------------------------------------------- |
540 | static int get_threadnum(struct thread_entry *thread) | 1238 | * Performs operations that must be done before blocking a thread but after |
1239 | * the state is saved - follows reverse of locking order. blk_ops.flags is | ||
1240 | * assumed to be nonzero. | ||
1241 | *--------------------------------------------------------------------------- | ||
1242 | */ | ||
1243 | static inline void run_blocking_ops( | ||
1244 | IF_COP_VOID(unsigned int core, struct thread_entry *thread)) | ||
541 | { | 1245 | { |
542 | int i; | 1246 | #if NUM_CORES > 1 |
543 | 1247 | struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops; | |
544 | for (i = 0; i < MAXTHREADS; i++) | 1248 | const unsigned flags = ops->flags; |
1249 | |||
1250 | if (flags == 0) | ||
1251 | return; | ||
1252 | |||
1253 | if (flags & TBOP_SWITCH_CORE) | ||
545 | { | 1254 | { |
546 | if (&threads[i] == thread) | 1255 | core_switch_blk_op(core, thread); |
547 | return i; | ||
548 | } | 1256 | } |
549 | 1257 | ||
550 | return -1; | 1258 | #if CONFIG_CORELOCK == SW_CORELOCK |
1259 | if (flags & TBOP_UNLOCK_LIST) | ||
1260 | { | ||
1261 | UNLOCK_LIST(ops->list_p, NULL); | ||
1262 | } | ||
1263 | |||
1264 | if (flags & TBOP_UNLOCK_CORELOCK) | ||
1265 | { | ||
1266 | corelock_unlock(ops->cl_p); | ||
1267 | } | ||
1268 | |||
1269 | if (flags & TBOP_UNLOCK_THREAD) | ||
1270 | { | ||
1271 | UNLOCK_THREAD(ops->thread, 0); | ||
1272 | } | ||
1273 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1274 | /* Write updated variable value into memory location */ | ||
1275 | switch (flags & TBOP_VAR_TYPE_MASK) | ||
1276 | { | ||
1277 | case TBOP_UNLOCK_LIST: | ||
1278 | UNLOCK_LIST(ops->list_p, ops->list_v); | ||
1279 | break; | ||
1280 | case TBOP_SET_VARi: | ||
1281 | *ops->var_ip = ops->var_iv; | ||
1282 | break; | ||
1283 | case TBOP_SET_VARu8: | ||
1284 | *ops->var_u8p = ops->var_u8v; | ||
1285 | break; | ||
1286 | } | ||
1287 | #endif /* CONFIG_CORELOCK == */ | ||
1288 | |||
1289 | /* Unlock thread's slot */ | ||
1290 | if (flags & TBOP_UNLOCK_CURRENT) | ||
1291 | { | ||
1292 | UNLOCK_THREAD(thread, ops->state); | ||
1293 | } | ||
1294 | |||
1295 | /* Reset the IRQ level */ | ||
1296 | if (flags & TBOP_IRQ_LEVEL) | ||
1297 | { | ||
1298 | set_irq_level(ops->irq_level); | ||
1299 | } | ||
1300 | |||
1301 | ops->flags = 0; | ||
1302 | #else | ||
1303 | int level = cores[CURRENT_CORE].irq_level; | ||
1304 | if (level == STAY_IRQ_LEVEL) | ||
1305 | return; | ||
1306 | |||
1307 | cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL; | ||
1308 | set_irq_level(level); | ||
1309 | #endif /* NUM_CORES */ | ||
551 | } | 1310 | } |
552 | 1311 | ||
553 | void profile_thread(void) { | 1312 | |
554 | profstart(get_threadnum(cores[CURRENT_CORE].running)); | 1313 | /*--------------------------------------------------------------------------- |
1314 | * Runs any operations that may cause threads to be ready to run and then | ||
1315 | * sleeps the processor core until the next interrupt if none are. | ||
1316 | *--------------------------------------------------------------------------- | ||
1317 | */ | ||
1318 | static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core)) | ||
1319 | { | ||
1320 | for (;;) | ||
1321 | { | ||
1322 | /* We want to do these ASAP as it may change the decision to sleep | ||
1323 | * the core or a core has woken because an interrupt occurred | ||
1324 | * and posted a message to a queue. */ | ||
1325 | if (cores[IF_COP_CORE(core)].waking.queue != NULL) | ||
1326 | { | ||
1327 | core_perform_wakeup(IF_COP(core)); | ||
1328 | } | ||
1329 | |||
1330 | /* If there are threads on a timeout and the earliest wakeup is due, | ||
1331 | * check the list and wake any threads that need to start running | ||
1332 | * again. */ | ||
1333 | if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check)) | ||
1334 | { | ||
1335 | check_tmo_threads(); | ||
1336 | } | ||
1337 | |||
1338 | /* If there is a ready to run task, return its ID and keep core | ||
1339 | * awake. */ | ||
1340 | if (cores[IF_COP_CORE(core)].running != NULL) | ||
1341 | { | ||
1342 | return cores[IF_COP_CORE(core)].running; | ||
1343 | } | ||
1344 | |||
1345 | /* Enter sleep mode to reduce power usage - woken up on interrupt or | ||
1346 | * wakeup request from another core. May abort if the waking list | ||
1347 | * became populated (again). See beginning of this file for the | ||
1348 | * algorithm to atomically determine this. */ | ||
1349 | core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue); | ||
1350 | } | ||
1351 | } | ||
1352 | |||
1353 | #ifdef RB_PROFILE | ||
1354 | void profile_thread(void) | ||
1355 | { | ||
1356 | profstart(cores[CURRENT_CORE].running - threads); | ||
555 | } | 1357 | } |
556 | #endif | 1358 | #endif |
557 | 1359 | ||
558 | static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline)); | 1360 | /*--------------------------------------------------------------------------- |
559 | static void change_thread_state(struct thread_entry **blocked_list) | 1361 | * Prepares a thread to block on an object's list and/or for a specified |
1362 | * duration - expects object and slot to be appropriately locked if needed. | ||
1363 | *--------------------------------------------------------------------------- | ||
1364 | */ | ||
1365 | static inline void _block_thread_on_l(struct thread_queue *list, | ||
1366 | struct thread_entry *thread, | ||
1367 | unsigned state | ||
1368 | IF_SWCL(, const bool nolock)) | ||
560 | { | 1369 | { |
561 | const unsigned int core = CURRENT_CORE; | 1370 | /* If inlined, unreachable branches will be pruned with no size penalty |
562 | struct thread_entry *old; | 1371 | because constant params are used for state and nolock. */ |
563 | unsigned long new_state; | 1372 | const unsigned int core = IF_COP_CORE(thread->core); |
564 | 1373 | ||
565 | /* Remove the thread from the list of running threads. */ | 1374 | /* Remove the thread from the list of running threads. */ |
566 | old = cores[core].running; | 1375 | remove_from_list_l(&cores[core].running, thread); |
567 | new_state = GET_STATE(old->statearg); | ||
568 | 1376 | ||
569 | /* Check if a thread state change has been requested. */ | 1377 | /* Add a timeout to the block if not infinite */ |
570 | if (new_state) | 1378 | switch (state) |
571 | { | 1379 | { |
572 | /* Change running thread state and switch to next thread. */ | 1380 | case STATE_BLOCKED: |
573 | remove_from_list(&cores[core].running, old); | 1381 | /* Put the thread into a new list of inactive threads. */ |
574 | 1382 | #if CONFIG_CORELOCK == SW_CORELOCK | |
575 | /* And put the thread into a new list of inactive threads. */ | 1383 | if (nolock) |
576 | if (new_state == STATE_BLOCKED) | 1384 | { |
577 | add_to_list(blocked_list, old); | 1385 | thread->bqp = NULL; /* Indicate nolock list */ |
1386 | thread->bqnlp = (struct thread_entry **)list; | ||
1387 | add_to_list_l((struct thread_entry **)list, thread); | ||
1388 | } | ||
1389 | else | ||
1390 | #endif | ||
1391 | { | ||
1392 | thread->bqp = list; | ||
1393 | add_to_list_l_locked(list, thread); | ||
1394 | } | ||
1395 | break; | ||
1396 | case STATE_BLOCKED_W_TMO: | ||
1397 | /* Put the thread into a new list of inactive threads. */ | ||
1398 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1399 | if (nolock) | ||
1400 | { | ||
1401 | thread->bqp = NULL; /* Indicate nolock list */ | ||
1402 | thread->bqnlp = (struct thread_entry **)list; | ||
1403 | add_to_list_l((struct thread_entry **)list, thread); | ||
1404 | } | ||
578 | else | 1405 | else |
579 | add_to_list(&cores[core].sleeping, old); | ||
580 | |||
581 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
582 | /* Reset priorities */ | ||
583 | if (old->priority == cores[core].highest_priority) | ||
584 | cores[core].highest_priority = 100; | ||
585 | #endif | 1406 | #endif |
1407 | { | ||
1408 | thread->bqp = list; | ||
1409 | add_to_list_l_locked(list, thread); | ||
1410 | } | ||
1411 | /* Fall-through */ | ||
1412 | case STATE_SLEEPING: | ||
1413 | /* If this thread times out sooner than any other thread, update | ||
1414 | next_tmo_check to its timeout */ | ||
1415 | if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check)) | ||
1416 | { | ||
1417 | cores[core].next_tmo_check = thread->tmo_tick; | ||
1418 | } | ||
1419 | |||
1420 | if (thread->tmo.prev == NULL) | ||
1421 | { | ||
1422 | add_to_list_tmo(thread); | ||
1423 | } | ||
1424 | /* else thread was never removed from list - just keep it there */ | ||
1425 | break; | ||
586 | } | 1426 | } |
587 | else | 1427 | |
588 | /* Switch to the next running thread. */ | 1428 | #ifdef HAVE_PRIORITY_SCHEDULING |
589 | cores[core].running = old->next; | 1429 | /* Reset priorities */ |
1430 | if (thread->priority == cores[core].highest_priority) | ||
1431 | cores[core].highest_priority = LOWEST_PRIORITY; | ||
1432 | #endif | ||
1433 | |||
1434 | #if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK | ||
1435 | /* Safe to set state now */ | ||
1436 | thread->state = state; | ||
1437 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
1438 | cores[core].blk_ops.state = state; | ||
1439 | #endif | ||
1440 | |||
1441 | #if NUM_CORES > 1 | ||
1442 | /* Delay slot unlock until task switch */ | ||
1443 | cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT; | ||
1444 | #endif | ||
1445 | } | ||
1446 | |||
1447 | static inline void block_thread_on_l( | ||
1448 | struct thread_queue *list, struct thread_entry *thread, unsigned state) | ||
1449 | { | ||
1450 | _block_thread_on_l(list, thread, state IF_SWCL(, false)); | ||
1451 | } | ||
1452 | |||
1453 | static inline void block_thread_on_l_no_listlock( | ||
1454 | struct thread_entry **list, struct thread_entry *thread, unsigned state) | ||
1455 | { | ||
1456 | _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true)); | ||
590 | } | 1457 | } |
591 | 1458 | ||
592 | /*--------------------------------------------------------------------------- | 1459 | /*--------------------------------------------------------------------------- |
593 | * Switch thread in round robin fashion. | 1460 | * Switch thread in round robin fashion for any given priority. Any thread |
1461 | * that removed itself from the running list first must specify itself in | ||
1462 | * the paramter. | ||
1463 | * | ||
1464 | * INTERNAL: Intended for use by kernel and not for programs. | ||
594 | *--------------------------------------------------------------------------- | 1465 | *--------------------------------------------------------------------------- |
595 | */ | 1466 | */ |
596 | void switch_thread(bool save_context, struct thread_entry **blocked_list) | 1467 | void switch_thread(struct thread_entry *old) |
597 | { | 1468 | { |
598 | const unsigned int core = CURRENT_CORE; | 1469 | const unsigned int core = CURRENT_CORE; |
1470 | struct thread_entry *thread = cores[core].running; | ||
1471 | |||
1472 | if (old == NULL) | ||
1473 | { | ||
1474 | /* Move to next thread */ | ||
1475 | old = thread; | ||
1476 | cores[core].running = old->l.next; | ||
1477 | } | ||
1478 | /* else running list is already at next thread */ | ||
599 | 1479 | ||
600 | #ifdef RB_PROFILE | 1480 | #ifdef RB_PROFILE |
601 | profile_thread_stopped(get_threadnum(cores[core].running)); | 1481 | profile_thread_stopped(old - threads); |
602 | #endif | 1482 | #endif |
603 | unsigned int *stackptr; | ||
604 | |||
605 | #ifdef SIMULATOR | ||
606 | /* Do nothing */ | ||
607 | #else | ||
608 | 1483 | ||
609 | /* Begin task switching by saving our current context so that we can | 1484 | /* Begin task switching by saving our current context so that we can |
610 | * restore the state of the current thread later to the point prior | 1485 | * restore the state of the current thread later to the point prior |
611 | * to this call. */ | 1486 | * to this call. */ |
612 | if (save_context) | 1487 | store_context(&old->context); |
613 | { | ||
614 | store_context(&cores[core].running->context); | ||
615 | 1488 | ||
616 | /* Check if the current thread stack is overflown */ | 1489 | /* Check if the current thread stack is overflown */ |
617 | stackptr = cores[core].running->stack; | 1490 | if(((unsigned int *)old->stack)[0] != DEADBEEF) |
618 | if(stackptr[0] != DEADBEEF) | 1491 | thread_stkov(old); |
619 | thread_stkov(cores[core].running); | 1492 | |
620 | 1493 | /* Run any blocking operations requested before switching/sleeping */ | |
621 | /* Rearrange thread lists as needed */ | 1494 | run_blocking_ops(IF_COP(core, old)); |
622 | change_thread_state(blocked_list); | ||
623 | 1495 | ||
624 | /* This has to be done after the scheduler is finished with the | ||
625 | blocked_list pointer so that an IRQ can't kill us by attempting | ||
626 | a wake but before attempting any core sleep. */ | ||
627 | if (cores[core].switch_to_irq_level != STAY_IRQ_LEVEL) | ||
628 | { | ||
629 | int level = cores[core].switch_to_irq_level; | ||
630 | cores[core].switch_to_irq_level = STAY_IRQ_LEVEL; | ||
631 | set_irq_level(level); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | /* Go through the list of sleeping task to check if we need to wake up | 1496 | /* Go through the list of sleeping task to check if we need to wake up |
636 | * any of them due to timeout. Also puts core into sleep state until | 1497 | * any of them due to timeout. Also puts core into sleep state until |
637 | * there is at least one running process again. */ | 1498 | * there is at least one running process again. */ |
638 | sleep_core(); | 1499 | thread = sleep_core(IF_COP(core)); |
639 | 1500 | ||
640 | #ifdef HAVE_PRIORITY_SCHEDULING | 1501 | #ifdef HAVE_PRIORITY_SCHEDULING |
641 | /* Select the new task based on priorities and the last time a process | 1502 | /* Select the new task based on priorities and the last time a process |
642 | * got CPU time. */ | 1503 | * got CPU time. */ |
643 | for (;;) | 1504 | for (;;) |
644 | { | 1505 | { |
645 | int priority = cores[core].running->priority; | 1506 | int priority = MIN(thread->priority, thread->priority_x); |
646 | 1507 | ||
647 | if (priority < cores[core].highest_priority) | 1508 | if (priority < cores[core].highest_priority) |
648 | cores[core].highest_priority = priority; | 1509 | cores[core].highest_priority = priority; |
649 | 1510 | ||
650 | if (priority == cores[core].highest_priority || | 1511 | if (priority == cores[core].highest_priority || |
651 | (current_tick - cores[core].running->last_run > | 1512 | (current_tick - thread->last_run > priority * 8)) |
652 | priority * 8) || | ||
653 | cores[core].running->priority_x != 0) | ||
654 | { | 1513 | { |
1514 | cores[core].running = thread; | ||
655 | break; | 1515 | break; |
656 | } | 1516 | } |
657 | 1517 | ||
658 | cores[core].running = cores[core].running->next; | 1518 | thread = thread->l.next; |
659 | } | 1519 | } |
660 | 1520 | ||
661 | /* Reset the value of thread's last running time to the current time. */ | 1521 | /* Reset the value of thread's last running time to the current time. */ |
662 | cores[core].running->last_run = current_tick; | 1522 | thread->last_run = current_tick; |
663 | #endif | 1523 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
664 | 1524 | ||
665 | #endif | ||
666 | |||
667 | /* And finally give control to the next thread. */ | 1525 | /* And finally give control to the next thread. */ |
668 | load_context(&cores[core].running->context); | 1526 | load_context(&thread->context); |
669 | 1527 | ||
670 | #ifdef RB_PROFILE | 1528 | #ifdef RB_PROFILE |
671 | profile_thread_started(get_threadnum(cores[core].running)); | 1529 | profile_thread_started(thread - threads); |
672 | #endif | 1530 | #endif |
673 | } | 1531 | } |
674 | 1532 | ||
675 | void sleep_thread(int ticks) | 1533 | /*--------------------------------------------------------------------------- |
1534 | * Removes the boost flag from a thread and unboosts the CPU if thread count | ||
1535 | * of boosted threads reaches zero. Requires thread slot to be locked first. | ||
1536 | *--------------------------------------------------------------------------- | ||
1537 | */ | ||
1538 | static inline void unboost_thread(struct thread_entry *thread) | ||
676 | { | 1539 | { |
677 | struct thread_entry *current; | ||
678 | |||
679 | current = cores[CURRENT_CORE].running; | ||
680 | |||
681 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 1540 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
682 | if (STATE_IS_BOOSTED(current->statearg)) | 1541 | if (thread->boosted != 0) |
683 | { | 1542 | { |
684 | boosted_threads--; | 1543 | thread->boosted = 0; |
685 | if (!boosted_threads) | 1544 | if (--boosted_threads == 0) |
686 | { | 1545 | { |
687 | cpu_boost(false); | 1546 | cpu_boost(false); |
688 | } | 1547 | } |
689 | } | 1548 | } |
690 | #endif | 1549 | #endif |
691 | 1550 | (void)thread; | |
692 | /* Set the thread's new state and timeout and finally force a task switch | ||
693 | * so that scheduler removes thread from the list of running processes | ||
694 | * and puts it in list of sleeping tasks. */ | ||
695 | SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1); | ||
696 | |||
697 | switch_thread(true, NULL); | ||
698 | } | 1551 | } |
699 | 1552 | ||
700 | void block_thread(struct thread_entry **list) | 1553 | /*--------------------------------------------------------------------------- |
1554 | * Sleeps a thread for a specified number of ticks and unboost the thread if | ||
1555 | * if it is boosted. If ticks is zero, it does not delay but instead switches | ||
1556 | * tasks. | ||
1557 | * | ||
1558 | * INTERNAL: Intended for use by kernel and not for programs. | ||
1559 | *--------------------------------------------------------------------------- | ||
1560 | */ | ||
1561 | void sleep_thread(int ticks) | ||
701 | { | 1562 | { |
702 | struct thread_entry *current; | ||
703 | |||
704 | /* Get the entry for the current running thread. */ | 1563 | /* Get the entry for the current running thread. */ |
705 | current = cores[CURRENT_CORE].running; | 1564 | struct thread_entry *current = cores[CURRENT_CORE].running; |
706 | 1565 | ||
707 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 1566 | #if NUM_CORES > 1 |
708 | /* Keep the boosted state over indefinite block calls, because | 1567 | /* Lock thread slot */ |
709 | * we are waiting until the earliest time that someone else | 1568 | GET_THREAD_STATE(current); |
710 | * completes an action */ | ||
711 | unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg); | ||
712 | #endif | 1569 | #endif |
713 | 1570 | ||
714 | /* We are not allowed to mix blocking types in one queue. */ | 1571 | /* Remove our boosted status if any */ |
715 | THREAD_ASSERT(*list != NULL && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO, | 1572 | unboost_thread(current); |
716 | "Blocking violation B->*T", current); | 1573 | |
717 | 1574 | /* Set our timeout, change lists, and finally switch threads. | |
1575 | * Unlock during switch on mulicore. */ | ||
1576 | current->tmo_tick = current_tick + ticks + 1; | ||
1577 | block_thread_on_l(NULL, current, STATE_SLEEPING); | ||
1578 | switch_thread(current); | ||
1579 | |||
1580 | /* Our status should be STATE_RUNNING */ | ||
1581 | THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING, | ||
1582 | "S:R->!*R", current); | ||
1583 | } | ||
1584 | |||
1585 | /*--------------------------------------------------------------------------- | ||
1586 | * Indefinitely block a thread on a blocking queue for explicit wakeup. | ||
1587 | * Caller with interrupt-accessible lists should disable interrupts first | ||
1588 | * and request a BOP_IRQ_LEVEL blocking operation to reset it. | ||
1589 | * | ||
1590 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
1591 | *--------------------------------------------------------------------------- | ||
1592 | */ | ||
1593 | IF_SWCL(static inline) void _block_thread(struct thread_queue *list | ||
1594 | IF_SWCL(, const bool nolock)) | ||
1595 | { | ||
1596 | /* Get the entry for the current running thread. */ | ||
1597 | struct thread_entry *current = cores[CURRENT_CORE].running; | ||
1598 | |||
718 | /* Set the state to blocked and ask the scheduler to switch tasks, | 1599 | /* Set the state to blocked and ask the scheduler to switch tasks, |
719 | * this takes us off of the run queue until we are explicitly woken */ | 1600 | * this takes us off of the run queue until we are explicitly woken */ |
720 | SET_STATE(current->statearg, STATE_BLOCKED, 0); | ||
721 | 1601 | ||
722 | switch_thread(true, list); | 1602 | #if NUM_CORES > 1 |
1603 | /* Lock thread slot */ | ||
1604 | GET_THREAD_STATE(current); | ||
1605 | #endif | ||
723 | 1606 | ||
724 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 1607 | #if CONFIG_CORELOCK == SW_CORELOCK |
725 | /* Reset only the boosted flag to indicate we are up and running again. */ | 1608 | /* One branch optimized away during inlining */ |
726 | current->statearg = boost_flag; | 1609 | if (nolock) |
727 | #else | 1610 | { |
728 | /* Clear all flags to indicate we are up and running again. */ | 1611 | block_thread_on_l_no_listlock((struct thread_entry **)list, |
729 | current->statearg = 0; | 1612 | current, STATE_BLOCKED); |
1613 | } | ||
1614 | else | ||
730 | #endif | 1615 | #endif |
1616 | { | ||
1617 | block_thread_on_l(list, current, STATE_BLOCKED); | ||
1618 | } | ||
1619 | |||
1620 | switch_thread(current); | ||
1621 | |||
1622 | /* Our status should be STATE_RUNNING */ | ||
1623 | THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING, | ||
1624 | "B:R->!*R", current); | ||
1625 | } | ||
1626 | |||
1627 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1628 | /* Inline lock/nolock version of _block_thread into these functions */ | ||
1629 | void block_thread(struct thread_queue *tq) | ||
1630 | { | ||
1631 | _block_thread(tq, false); | ||
731 | } | 1632 | } |
732 | 1633 | ||
733 | void block_thread_w_tmo(struct thread_entry **list, int timeout) | 1634 | void block_thread_no_listlock(struct thread_entry **list) |
1635 | { | ||
1636 | _block_thread((struct thread_queue *)list, true); | ||
1637 | } | ||
1638 | #endif /* CONFIG_CORELOCK */ | ||
1639 | |||
1640 | /*--------------------------------------------------------------------------- | ||
1641 | * Block a thread on a blocking queue for a specified time interval or until | ||
1642 | * explicitly woken - whichever happens first. | ||
1643 | * Caller with interrupt-accessible lists should disable interrupts first | ||
1644 | * and request that interrupt level be restored after switching out the | ||
1645 | * current thread. | ||
1646 | * | ||
1647 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
1648 | *--------------------------------------------------------------------------- | ||
1649 | */ | ||
1650 | void block_thread_w_tmo(struct thread_queue *list, int timeout) | ||
734 | { | 1651 | { |
735 | struct thread_entry *current; | ||
736 | /* Get the entry for the current running thread. */ | 1652 | /* Get the entry for the current running thread. */ |
737 | current = cores[CURRENT_CORE].running; | 1653 | struct thread_entry *current = cores[CURRENT_CORE].running; |
738 | 1654 | ||
739 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 1655 | #if NUM_CORES > 1 |
1656 | /* Lock thread slot */ | ||
1657 | GET_THREAD_STATE(current); | ||
1658 | #endif | ||
1659 | |||
740 | /* A block with a timeout is a sleep situation, whatever we are waiting | 1660 | /* A block with a timeout is a sleep situation, whatever we are waiting |
741 | * for _may or may not_ happen, regardless of boost state, (user input | 1661 | * for _may or may not_ happen, regardless of boost state, (user input |
742 | * for instance), so this thread no longer needs to boost */ | 1662 | * for instance), so this thread no longer needs to boost */ |
743 | if (STATE_IS_BOOSTED(current->statearg)) | 1663 | unboost_thread(current); |
744 | { | ||
745 | boosted_threads--; | ||
746 | if (!boosted_threads) | ||
747 | { | ||
748 | cpu_boost(false); | ||
749 | } | ||
750 | } | ||
751 | #endif | ||
752 | |||
753 | /* We can store only one thread to the "list" if thread is used | ||
754 | * in other list (such as core's list for sleeping tasks). */ | ||
755 | THREAD_ASSERT(*list == NULL, "Blocking violation T->*B", current); | ||
756 | 1664 | ||
757 | /* Set the state to blocked with the specified timeout */ | 1665 | /* Set the state to blocked with the specified timeout */ |
758 | SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout); | 1666 | current->tmo_tick = current_tick + timeout; |
759 | 1667 | /* Set the list for explicit wakeup */ | |
760 | /* Set the "list" for explicit wakeup */ | 1668 | block_thread_on_l(list, current, STATE_BLOCKED_W_TMO); |
761 | *list = current; | ||
762 | 1669 | ||
763 | /* Now force a task switch and block until we have been woken up | 1670 | /* Now force a task switch and block until we have been woken up |
764 | * by another thread or timeout is reached. */ | 1671 | * by another thread or timeout is reached - whichever happens first */ |
765 | switch_thread(true, NULL); | 1672 | switch_thread(current); |
766 | 1673 | ||
767 | /* It is now safe for another thread to block on this "list" */ | 1674 | /* Our status should be STATE_RUNNING */ |
768 | *list = NULL; | 1675 | THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING, |
1676 | "T:R->!*R", current); | ||
769 | } | 1677 | } |
770 | 1678 | ||
771 | #if !defined(SIMULATOR) | 1679 | /*--------------------------------------------------------------------------- |
772 | void set_irq_level_and_block_thread(struct thread_entry **list, int level) | 1680 | * Explicitly wakeup a thread on a blocking queue. Has no effect on threads |
1681 | * that called sleep(). | ||
1682 | * Caller with interrupt-accessible lists should disable interrupts first. | ||
1683 | * This code should be considered a critical section by the caller. | ||
1684 | * | ||
1685 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
1686 | *--------------------------------------------------------------------------- | ||
1687 | */ | ||
1688 | IF_SWCL(static inline) struct thread_entry * _wakeup_thread( | ||
1689 | struct thread_queue *list IF_SWCL(, const bool nolock)) | ||
773 | { | 1690 | { |
774 | cores[CURRENT_CORE].switch_to_irq_level = level; | 1691 | struct thread_entry *t; |
775 | block_thread(list); | 1692 | struct thread_entry *thread; |
776 | } | 1693 | unsigned state; |
777 | 1694 | ||
778 | void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list, | 1695 | /* Wake up the last thread first. */ |
779 | int timeout, int level) | 1696 | #if CONFIG_CORELOCK == SW_CORELOCK |
780 | { | 1697 | /* One branch optimized away during inlining */ |
781 | cores[CURRENT_CORE].switch_to_irq_level = level; | 1698 | if (nolock) |
782 | block_thread_w_tmo(list, timeout); | 1699 | { |
783 | } | 1700 | t = list->queue; |
1701 | } | ||
1702 | else | ||
784 | #endif | 1703 | #endif |
1704 | { | ||
1705 | t = LOCK_LIST(list); | ||
1706 | } | ||
785 | 1707 | ||
786 | void wakeup_thread(struct thread_entry **list) | ||
787 | { | ||
788 | struct thread_entry *thread; | ||
789 | |||
790 | /* Check if there is a blocked thread at all. */ | 1708 | /* Check if there is a blocked thread at all. */ |
791 | if (*list == NULL) | 1709 | if (t == NULL) |
792 | { | 1710 | { |
793 | return ; | 1711 | #if CONFIG_CORELOCK == SW_CORELOCK |
1712 | if (!nolock) | ||
1713 | #endif | ||
1714 | { | ||
1715 | UNLOCK_LIST(list, NULL); | ||
1716 | } | ||
1717 | return NULL; | ||
794 | } | 1718 | } |
795 | 1719 | ||
796 | /* Wake up the last thread first. */ | 1720 | thread = t; |
797 | thread = *list; | 1721 | |
798 | 1722 | #if NUM_CORES > 1 | |
1723 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1724 | if (nolock) | ||
1725 | { | ||
1726 | /* Lock thread only, not list */ | ||
1727 | state = GET_THREAD_STATE(thread); | ||
1728 | } | ||
1729 | else | ||
1730 | #endif | ||
1731 | { | ||
1732 | /* This locks in reverse order from other routines so a retry in the | ||
1733 | correct order may be needed */ | ||
1734 | state = TRY_GET_THREAD_STATE(thread); | ||
1735 | if (state == STATE_BUSY) | ||
1736 | { | ||
1737 | /* Unlock list and retry slot, then list */ | ||
1738 | UNLOCK_LIST(list, t); | ||
1739 | state = GET_THREAD_STATE(thread); | ||
1740 | t = LOCK_LIST(list); | ||
1741 | /* Be sure thread still exists here - it couldn't have re-added | ||
1742 | itself if it was woken elsewhere because this function is | ||
1743 | serialized within the object that owns the list. */ | ||
1744 | if (thread != t) | ||
1745 | { | ||
1746 | /* Thread disappeared :( */ | ||
1747 | UNLOCK_LIST(list, t); | ||
1748 | UNLOCK_THREAD(thread, state); | ||
1749 | return THREAD_WAKEUP_MISSING; /* Indicate disappearance */ | ||
1750 | } | ||
1751 | } | ||
1752 | } | ||
1753 | #else /* NUM_CORES == 1 */ | ||
1754 | state = GET_THREAD_STATE(thread); | ||
1755 | #endif /* NUM_CORES */ | ||
1756 | |||
799 | /* Determine thread's current state. */ | 1757 | /* Determine thread's current state. */ |
800 | switch (GET_STATE(thread->statearg)) | 1758 | switch (state) |
801 | { | 1759 | { |
802 | case STATE_BLOCKED: | 1760 | case STATE_BLOCKED: |
803 | /* Remove thread from the list of blocked threads and add it | 1761 | case STATE_BLOCKED_W_TMO: |
804 | * to the scheduler's list of running processes. List removal | 1762 | /* Remove thread from object's blocked list - select t or list depending |
805 | * is safe since each object maintains it's own list of | 1763 | on locking type at compile time */ |
806 | * sleepers and queues protect against reentrancy. */ | 1764 | REMOVE_FROM_LIST_L_SELECT(t, list, thread); |
807 | remove_from_list(list, thread); | 1765 | #if CONFIG_CORELOCK == SW_CORELOCK |
808 | add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread); | 1766 | /* Statment optimized away during inlining if nolock != false */ |
809 | 1767 | if (!nolock) | |
810 | case STATE_BLOCKED_W_TMO: | 1768 | #endif |
811 | /* Just remove the timeout to cause scheduler to immediately | 1769 | { |
812 | * wake up the thread. */ | 1770 | UNLOCK_LIST(list, t); /* Unlock list - removal complete */ |
813 | thread->statearg = 0; | 1771 | } |
814 | break; | 1772 | |
815 | 1773 | #ifdef HAVE_PRIORITY_SCHEDULING | |
816 | default: | 1774 | /* Give the task a kick to avoid a stall after wakeup. |
817 | /* Nothing to do. Thread has already been woken up | 1775 | Not really proper treatment - TODO later. */ |
818 | * or it's state is not blocked or blocked with timeout. */ | 1776 | thread->last_run = current_tick - 8*LOWEST_PRIORITY; |
819 | return ; | 1777 | #endif |
1778 | core_schedule_wakeup(thread); | ||
1779 | UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); | ||
1780 | return thread; | ||
1781 | default: | ||
1782 | /* Nothing to do. State is not blocked. */ | ||
1783 | #if THREAD_EXTRA_CHECKS | ||
1784 | THREAD_PANICF("wakeup_thread->block invalid", thread); | ||
1785 | case STATE_RUNNING: | ||
1786 | case STATE_KILLED: | ||
1787 | #endif | ||
1788 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1789 | /* Statement optimized away during inlining if nolock != false */ | ||
1790 | if (!nolock) | ||
1791 | #endif | ||
1792 | { | ||
1793 | UNLOCK_LIST(list, t); /* Unlock the object's list */ | ||
1794 | } | ||
1795 | UNLOCK_THREAD(thread, state); /* Unlock thread slot */ | ||
1796 | return NULL; | ||
820 | } | 1797 | } |
821 | } | 1798 | } |
822 | 1799 | ||
823 | inline static int find_empty_thread_slot(void) | 1800 | #if CONFIG_CORELOCK == SW_CORELOCK |
1801 | /* Inline lock/nolock version of _wakeup_thread into these functions */ | ||
1802 | struct thread_entry * wakeup_thread(struct thread_queue *tq) | ||
1803 | { | ||
1804 | return _wakeup_thread(tq, false); | ||
1805 | } | ||
1806 | |||
1807 | struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list) | ||
1808 | { | ||
1809 | return _wakeup_thread((struct thread_queue *)list, true); | ||
1810 | } | ||
1811 | #endif /* CONFIG_CORELOCK */ | ||
1812 | |||
1813 | /*--------------------------------------------------------------------------- | ||
1814 | * Find an empty thread slot or MAXTHREADS if none found. The slot returned | ||
1815 | * will be locked on multicore. | ||
1816 | *--------------------------------------------------------------------------- | ||
1817 | */ | ||
1818 | static int find_empty_thread_slot(void) | ||
824 | { | 1819 | { |
1820 | #if NUM_CORES > 1 | ||
1821 | /* Any slot could be on an IRQ-accessible list */ | ||
1822 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
1823 | #endif | ||
1824 | /* Thread slots are not locked on single core */ | ||
1825 | |||
825 | int n; | 1826 | int n; |
826 | 1827 | ||
827 | for (n = 0; n < MAXTHREADS; n++) | 1828 | for (n = 0; n < MAXTHREADS; n++) |
828 | { | 1829 | { |
829 | if (threads[n].name == NULL) | 1830 | /* Obtain current slot state - lock it on multicore */ |
830 | return n; | 1831 | unsigned state = GET_THREAD_STATE(&threads[n]); |
1832 | |||
1833 | if (state == STATE_KILLED | ||
1834 | #if NUM_CORES > 1 | ||
1835 | && threads[n].name != THREAD_DESTRUCT | ||
1836 | #endif | ||
1837 | ) | ||
1838 | { | ||
1839 | /* Slot is empty - leave it locked and caller will unlock */ | ||
1840 | break; | ||
1841 | } | ||
1842 | |||
1843 | /* Finished examining slot - no longer busy - unlock on multicore */ | ||
1844 | UNLOCK_THREAD(&threads[n], state); | ||
831 | } | 1845 | } |
832 | 1846 | ||
833 | return -1; | 1847 | #if NUM_CORES > 1 |
1848 | set_irq_level(oldlevel); /* Reenable interrups - this slot is | ||
1849 | not accesible to them yet */ | ||
1850 | #endif | ||
1851 | |||
1852 | return n; | ||
834 | } | 1853 | } |
835 | 1854 | ||
836 | /* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled | 1855 | |
837 | before calling. */ | 1856 | /*--------------------------------------------------------------------------- |
838 | void wakeup_thread_irq_safe(struct thread_entry **list) | 1857 | * Place the current core in idle mode - woken up on interrupt or wake |
1858 | * request from another core. | ||
1859 | *--------------------------------------------------------------------------- | ||
1860 | */ | ||
1861 | void core_idle(void) | ||
839 | { | 1862 | { |
840 | struct core_entry *core = &cores[CURRENT_CORE]; | 1863 | const unsigned int core = CURRENT_CORE; |
841 | /* Switch wakeup lists and call wakeup_thread */ | 1864 | core_sleep(IF_COP(core,) &cores[core].waking.queue); |
842 | core->wakeup_list = &core->waking; | ||
843 | wakeup_thread(list); | ||
844 | /* Switch back to normal running list */ | ||
845 | core->wakeup_list = &core->running; | ||
846 | } | 1865 | } |
847 | 1866 | ||
848 | /*--------------------------------------------------------------------------- | 1867 | /*--------------------------------------------------------------------------- |
@@ -854,44 +1873,23 @@ void wakeup_thread_irq_safe(struct thread_entry **list) | |||
854 | */ | 1873 | */ |
855 | struct thread_entry* | 1874 | struct thread_entry* |
856 | create_thread(void (*function)(void), void* stack, int stack_size, | 1875 | create_thread(void (*function)(void), void* stack, int stack_size, |
857 | const char *name IF_PRIO(, int priority) | 1876 | unsigned flags, const char *name |
858 | IF_COP(, unsigned int core, bool fallback)) | 1877 | IF_PRIO(, int priority) |
1878 | IF_COP(, unsigned int core)) | ||
859 | { | 1879 | { |
860 | unsigned int i; | 1880 | unsigned int i; |
861 | unsigned int stacklen; | 1881 | unsigned int stacklen; |
862 | unsigned int *stackptr; | 1882 | unsigned int *stackptr; |
863 | int slot; | 1883 | int slot; |
864 | struct thread_entry *thread; | 1884 | struct thread_entry *thread; |
865 | 1885 | unsigned state; | |
866 | /***** | ||
867 | * Ugly code alert! | ||
868 | * To prevent ifdef hell while keeping the binary size down, we define | ||
869 | * core here if it hasn't been passed as a parameter | ||
870 | *****/ | ||
871 | #if NUM_CORES == 1 | ||
872 | #define core CPU | ||
873 | #endif | ||
874 | |||
875 | #if NUM_CORES > 1 | ||
876 | /* If the kernel hasn't initialised on the COP (most likely due to an old | ||
877 | * bootloader) then refuse to start threads on the COP | ||
878 | */ | ||
879 | if ((core == COP) && !cores[core].kernel_running) | ||
880 | { | ||
881 | if (fallback) | ||
882 | return create_thread(function, stack, stack_size, name | ||
883 | IF_PRIO(, priority) IF_COP(, CPU, false)); | ||
884 | else | ||
885 | return NULL; | ||
886 | } | ||
887 | #endif | ||
888 | 1886 | ||
889 | slot = find_empty_thread_slot(); | 1887 | slot = find_empty_thread_slot(); |
890 | if (slot < 0) | 1888 | if (slot >= MAXTHREADS) |
891 | { | 1889 | { |
892 | return NULL; | 1890 | return NULL; |
893 | } | 1891 | } |
894 | 1892 | ||
895 | /* Munge the stack to make it easy to spot stack overflows */ | 1893 | /* Munge the stack to make it easy to spot stack overflows */ |
896 | stacklen = stack_size / sizeof(int); | 1894 | stacklen = stack_size / sizeof(int); |
897 | stackptr = stack; | 1895 | stackptr = stack; |
@@ -905,11 +1903,19 @@ struct thread_entry* | |||
905 | thread->name = name; | 1903 | thread->name = name; |
906 | thread->stack = stack; | 1904 | thread->stack = stack; |
907 | thread->stack_size = stack_size; | 1905 | thread->stack_size = stack_size; |
908 | thread->statearg = 0; | 1906 | thread->bqp = NULL; |
1907 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
1908 | thread->bqnlp = NULL; | ||
1909 | #endif | ||
1910 | thread->queue = NULL; | ||
1911 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
1912 | thread->boosted = 0; | ||
1913 | #endif | ||
909 | #ifdef HAVE_PRIORITY_SCHEDULING | 1914 | #ifdef HAVE_PRIORITY_SCHEDULING |
910 | thread->priority_x = 0; | 1915 | thread->priority_x = LOWEST_PRIORITY; |
911 | thread->priority = priority; | 1916 | thread->priority = priority; |
912 | cores[core].highest_priority = 100; | 1917 | thread->last_run = current_tick - priority * 8; |
1918 | cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY; | ||
913 | #endif | 1919 | #endif |
914 | 1920 | ||
915 | #if NUM_CORES > 1 | 1921 | #if NUM_CORES > 1 |
@@ -921,6 +1927,12 @@ struct thread_entry* | |||
921 | flush_icache(); | 1927 | flush_icache(); |
922 | } | 1928 | } |
923 | #endif | 1929 | #endif |
1930 | |||
1931 | /* Thread is not on any timeout list but be a bit paranoid */ | ||
1932 | thread->tmo.prev = NULL; | ||
1933 | |||
1934 | state = (flags & CREATE_THREAD_FROZEN) ? | ||
1935 | STATE_FROZEN : STATE_RUNNING; | ||
924 | 1936 | ||
925 | /* Align stack to an even 32 bit boundary */ | 1937 | /* Align stack to an even 32 bit boundary */ |
926 | thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3); | 1938 | thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3); |
@@ -928,50 +1940,149 @@ struct thread_entry* | |||
928 | /* Load the thread's context structure with needed startup information */ | 1940 | /* Load the thread's context structure with needed startup information */ |
929 | THREAD_STARTUP_INIT(core, thread, function); | 1941 | THREAD_STARTUP_INIT(core, thread, function); |
930 | 1942 | ||
931 | add_to_list(&cores[core].running, thread); | 1943 | if (state == STATE_RUNNING) |
1944 | { | ||
1945 | #if NUM_CORES > 1 | ||
1946 | if (core != CURRENT_CORE) | ||
1947 | { | ||
1948 | /* Next task switch on other core moves thread to running list */ | ||
1949 | core_schedule_wakeup(thread); | ||
1950 | } | ||
1951 | else | ||
1952 | #endif | ||
1953 | { | ||
1954 | /* Place on running list immediately */ | ||
1955 | add_to_list_l(&cores[IF_COP_CORE(core)].running, thread); | ||
1956 | } | ||
1957 | } | ||
932 | 1958 | ||
1959 | /* remove lock and set state */ | ||
1960 | UNLOCK_THREAD_SET_STATE(thread, state); | ||
1961 | |||
933 | return thread; | 1962 | return thread; |
934 | #if NUM_CORES == 1 | ||
935 | #undef core | ||
936 | #endif | ||
937 | } | 1963 | } |
938 | 1964 | ||
939 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 1965 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
940 | void trigger_cpu_boost(void) | 1966 | void trigger_cpu_boost(void) |
941 | { | 1967 | { |
942 | if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg)) | 1968 | /* No IRQ disable nescessary since the current thread cannot be blocked |
1969 | on an IRQ-accessible list */ | ||
1970 | struct thread_entry *current = cores[CURRENT_CORE].running; | ||
1971 | unsigned state; | ||
1972 | |||
1973 | state = GET_THREAD_STATE(current); | ||
1974 | |||
1975 | if (current->boosted == 0) | ||
943 | { | 1976 | { |
944 | SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg); | 1977 | current->boosted = 1; |
945 | if (!boosted_threads) | 1978 | if (++boosted_threads == 1) |
946 | { | 1979 | { |
947 | cpu_boost(true); | 1980 | cpu_boost(true); |
948 | } | 1981 | } |
949 | boosted_threads++; | ||
950 | } | 1982 | } |
1983 | |||
1984 | UNLOCK_THREAD(current, state); | ||
1985 | (void)state; | ||
951 | } | 1986 | } |
952 | #endif | 1987 | #endif /* HAVE_SCHEDULER_BOOSTCTRL */ |
953 | 1988 | ||
954 | /*--------------------------------------------------------------------------- | 1989 | /*--------------------------------------------------------------------------- |
955 | * Remove a thread on the current core from the scheduler. | 1990 | * Remove a thread from the scheduler. |
956 | * Parameter is the ID as returned from create_thread(). | 1991 | * Parameter is the ID as returned from create_thread(). |
1992 | * | ||
1993 | * Use with care on threads that are not under careful control as this may | ||
1994 | * leave various objects in an undefined state. When trying to kill a thread | ||
1995 | * on another processor, be sure you know what it's doing and won't be | ||
1996 | * switching around itself. | ||
957 | *--------------------------------------------------------------------------- | 1997 | *--------------------------------------------------------------------------- |
958 | */ | 1998 | */ |
959 | void remove_thread(struct thread_entry *thread) | 1999 | void remove_thread(struct thread_entry *thread) |
960 | { | 2000 | { |
2001 | #if NUM_CORES > 1 | ||
2002 | /* core is not constant here because of core switching */ | ||
2003 | unsigned int core = CURRENT_CORE; | ||
2004 | unsigned int old_core = NUM_CORES; | ||
2005 | #else | ||
961 | const unsigned int core = CURRENT_CORE; | 2006 | const unsigned int core = CURRENT_CORE; |
2007 | #endif | ||
2008 | unsigned state; | ||
2009 | int oldlevel; | ||
962 | 2010 | ||
963 | if (thread == NULL) | 2011 | if (thread == NULL) |
964 | thread = cores[core].running; | 2012 | thread = cores[core].running; |
965 | 2013 | ||
966 | /* Free the entry by removing thread name. */ | 2014 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); |
967 | thread->name = NULL; | 2015 | state = GET_THREAD_STATE(thread); |
2016 | |||
2017 | if (state == STATE_KILLED) | ||
2018 | { | ||
2019 | goto thread_killed; | ||
2020 | } | ||
2021 | |||
2022 | #if NUM_CORES > 1 | ||
2023 | if (thread->core != core) | ||
2024 | { | ||
2025 | /* Switch cores and safely extract the thread there */ | ||
2026 | /* Slot HAS to be unlocked or a deadlock could occur - potential livelock | ||
2027 | condition if the thread runs away to another processor. */ | ||
2028 | unsigned int new_core = thread->core; | ||
2029 | const char *old_name = thread->name; | ||
2030 | |||
2031 | thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */ | ||
2032 | UNLOCK_THREAD(thread, state); | ||
2033 | set_irq_level(oldlevel); | ||
2034 | |||
2035 | old_core = switch_core(new_core); | ||
2036 | |||
2037 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
2038 | state = GET_THREAD_STATE(thread); | ||
2039 | |||
2040 | core = new_core; | ||
2041 | |||
2042 | if (state == STATE_KILLED) | ||
2043 | { | ||
2044 | /* Thread suicided before we could kill it */ | ||
2045 | goto thread_killed; | ||
2046 | } | ||
2047 | |||
2048 | /* Reopen slot - it's locked again anyway */ | ||
2049 | thread->name = old_name; | ||
2050 | |||
2051 | if (thread->core != core) | ||
2052 | { | ||
2053 | /* We won't play thread tag - just forget it */ | ||
2054 | UNLOCK_THREAD(thread, state); | ||
2055 | set_irq_level(oldlevel); | ||
2056 | goto thread_kill_abort; | ||
2057 | } | ||
2058 | |||
2059 | /* Perform the extraction and switch ourselves back to the original | ||
2060 | processor */ | ||
2061 | } | ||
2062 | #endif /* NUM_CORES > 1 */ | ||
2063 | |||
968 | #ifdef HAVE_PRIORITY_SCHEDULING | 2064 | #ifdef HAVE_PRIORITY_SCHEDULING |
969 | cores[IF_COP2(thread->core)].highest_priority = 100; | 2065 | cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY; |
970 | #endif | 2066 | #endif |
971 | 2067 | if (thread->tmo.prev != NULL) | |
972 | if (thread == cores[IF_COP2(thread->core)].running) | ||
973 | { | 2068 | { |
974 | remove_from_list(&cores[IF_COP2(thread->core)].running, thread); | 2069 | /* Clean thread off the timeout list if a timeout check hasn't |
2070 | * run yet */ | ||
2071 | remove_from_list_tmo(thread); | ||
2072 | } | ||
2073 | |||
2074 | if (thread == cores[core].running) | ||
2075 | { | ||
2076 | /* Suicide - thread has unconditional rights to do this */ | ||
2077 | /* Maintain locks until switch-out */ | ||
2078 | #if NUM_CORES > 1 | ||
2079 | cores[core].blk_ops.flags = TBOP_IRQ_LEVEL; | ||
2080 | cores[core].blk_ops.irq_level = oldlevel; | ||
2081 | #else | ||
2082 | cores[core].irq_level = oldlevel; | ||
2083 | #endif | ||
2084 | block_thread_on_l(NULL, thread, STATE_KILLED); | ||
2085 | |||
975 | #if NUM_CORES > 1 | 2086 | #if NUM_CORES > 1 |
976 | /* Switch to the idle stack if not on the main core (where "main" | 2087 | /* Switch to the idle stack if not on the main core (where "main" |
977 | * runs) */ | 2088 | * runs) */ |
@@ -982,55 +2093,347 @@ void remove_thread(struct thread_entry *thread) | |||
982 | 2093 | ||
983 | flush_icache(); | 2094 | flush_icache(); |
984 | #endif | 2095 | #endif |
985 | switch_thread(false, NULL); | 2096 | /* Signal this thread */ |
2097 | thread_queue_wake_no_listlock(&thread->queue); | ||
2098 | /* Switch tasks and never return */ | ||
2099 | switch_thread(thread); | ||
986 | /* This should never and must never be reached - if it is, the | 2100 | /* This should never and must never be reached - if it is, the |
987 | * state is corrupted */ | 2101 | * state is corrupted */ |
988 | THREAD_PANICF("remove_thread->K:*R", thread); | 2102 | THREAD_PANICF("remove_thread->K:*R", thread); |
989 | } | 2103 | } |
990 | 2104 | ||
991 | if (thread == cores[IF_COP2(thread->core)].sleeping) | 2105 | #if NUM_CORES > 1 |
992 | remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread); | 2106 | if (thread->name == THREAD_DESTRUCT) |
2107 | { | ||
2108 | /* Another core is doing this operation already */ | ||
2109 | UNLOCK_THREAD(thread, state); | ||
2110 | set_irq_level(oldlevel); | ||
2111 | return; | ||
2112 | } | ||
2113 | #endif | ||
2114 | if (cores[core].waking.queue != NULL) | ||
2115 | { | ||
2116 | /* Get any threads off the waking list and onto the running | ||
2117 | * list first - waking and running cannot be distinguished by | ||
2118 | * state */ | ||
2119 | core_perform_wakeup(IF_COP(core)); | ||
2120 | } | ||
2121 | |||
2122 | switch (state) | ||
2123 | { | ||
2124 | case STATE_RUNNING: | ||
2125 | /* Remove thread from ready to run tasks */ | ||
2126 | remove_from_list_l(&cores[core].running, thread); | ||
2127 | break; | ||
2128 | case STATE_BLOCKED: | ||
2129 | case STATE_BLOCKED_W_TMO: | ||
2130 | /* Remove thread from the queue it's blocked on - including its | ||
2131 | * own if waiting there */ | ||
2132 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
2133 | /* One or the other will be valid */ | ||
2134 | if (thread->bqp == NULL) | ||
2135 | { | ||
2136 | remove_from_list_l(thread->bqnlp, thread); | ||
2137 | } | ||
2138 | else | ||
2139 | #endif /* CONFIG_CORELOCK */ | ||
2140 | { | ||
2141 | remove_from_list_l_locked(thread->bqp, thread); | ||
2142 | } | ||
2143 | break; | ||
2144 | /* Otherwise thread is killed or is frozen and hasn't run yet */ | ||
2145 | } | ||
2146 | |||
2147 | /* If thread was waiting on itself, it will have been removed above. | ||
2148 | * The wrong order would result in waking the thread first and deadlocking | ||
2149 | * since the slot is already locked. */ | ||
2150 | thread_queue_wake_no_listlock(&thread->queue); | ||
2151 | |||
2152 | thread_killed: /* Thread was already killed */ | ||
2153 | /* Removal complete - safe to unlock state and reenable interrupts */ | ||
2154 | UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED); | ||
2155 | set_irq_level(oldlevel); | ||
2156 | |||
2157 | #if NUM_CORES > 1 | ||
2158 | thread_kill_abort: /* Something stopped us from killing the thread */ | ||
2159 | if (old_core < NUM_CORES) | ||
2160 | { | ||
2161 | /* Did a removal on another processor's thread - switch back to | ||
2162 | native core */ | ||
2163 | switch_core(old_core); | ||
2164 | } | ||
2165 | #endif | ||
2166 | } | ||
2167 | |||
2168 | /*--------------------------------------------------------------------------- | ||
2169 | * Block the current thread until another thread terminates. A thread may | ||
2170 | * wait on itself to terminate which prevents it from running again and it | ||
2171 | * will need to be killed externally. | ||
2172 | * Parameter is the ID as returned from create_thread(). | ||
2173 | *--------------------------------------------------------------------------- | ||
2174 | */ | ||
2175 | void thread_wait(struct thread_entry *thread) | ||
2176 | { | ||
2177 | const unsigned int core = CURRENT_CORE; | ||
2178 | struct thread_entry *current = cores[core].running; | ||
2179 | unsigned thread_state; | ||
2180 | #if NUM_CORES > 1 | ||
2181 | int oldlevel; | ||
2182 | unsigned current_state; | ||
2183 | #endif | ||
2184 | |||
2185 | if (thread == NULL) | ||
2186 | thread = current; | ||
2187 | |||
2188 | #if NUM_CORES > 1 | ||
2189 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
2190 | #endif | ||
2191 | |||
2192 | thread_state = GET_THREAD_STATE(thread); | ||
2193 | |||
2194 | #if NUM_CORES > 1 | ||
2195 | /* We can't lock the same slot twice. The waitee will also lock itself | ||
2196 | first then the thread slots that will be locked and woken in turn. | ||
2197 | The same order must be observed here as well. */ | ||
2198 | if (thread == current) | ||
2199 | { | ||
2200 | current_state = thread_state; | ||
2201 | } | ||
993 | else | 2202 | else |
994 | remove_from_list(NULL, thread); | 2203 | { |
2204 | current_state = GET_THREAD_STATE(current); | ||
2205 | } | ||
2206 | #endif | ||
2207 | |||
2208 | if (thread_state != STATE_KILLED) | ||
2209 | { | ||
2210 | #if NUM_CORES > 1 | ||
2211 | cores[core].blk_ops.flags = TBOP_IRQ_LEVEL; | ||
2212 | cores[core].blk_ops.irq_level = oldlevel; | ||
2213 | #endif | ||
2214 | /* Unlock the waitee state at task switch - not done for self-wait | ||
2215 | because the would double-unlock the state and potentially | ||
2216 | corrupt another's busy assert on the slot */ | ||
2217 | if (thread != current) | ||
2218 | { | ||
2219 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
2220 | cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD; | ||
2221 | cores[core].blk_ops.thread = thread; | ||
2222 | #elif CONFIG_CORELOCK == CORELOCK_SWAP | ||
2223 | cores[core].blk_ops.flags |= TBOP_SET_VARu8; | ||
2224 | cores[core].blk_ops.var_u8p = &thread->state; | ||
2225 | cores[core].blk_ops.var_u8v = thread_state; | ||
2226 | #endif | ||
2227 | } | ||
2228 | block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED); | ||
2229 | switch_thread(current); | ||
2230 | return; | ||
2231 | } | ||
2232 | |||
2233 | /* Unlock both slots - obviously the current thread can't have | ||
2234 | STATE_KILLED so the above if clause will always catch a thread | ||
2235 | waiting on itself */ | ||
2236 | #if NUM_CORES > 1 | ||
2237 | UNLOCK_THREAD(current, current_state); | ||
2238 | UNLOCK_THREAD(thread, thread_state); | ||
2239 | set_irq_level(oldlevel); | ||
2240 | #endif | ||
995 | } | 2241 | } |
996 | 2242 | ||
997 | #ifdef HAVE_PRIORITY_SCHEDULING | 2243 | #ifdef HAVE_PRIORITY_SCHEDULING |
2244 | /*--------------------------------------------------------------------------- | ||
2245 | * Sets the thread's relative priority for the core it runs on. | ||
2246 | *--------------------------------------------------------------------------- | ||
2247 | */ | ||
998 | int thread_set_priority(struct thread_entry *thread, int priority) | 2248 | int thread_set_priority(struct thread_entry *thread, int priority) |
999 | { | 2249 | { |
1000 | int old_priority; | 2250 | unsigned old_priority = (unsigned)-1; |
1001 | 2251 | ||
1002 | if (thread == NULL) | 2252 | if (thread == NULL) |
1003 | thread = cores[CURRENT_CORE].running; | 2253 | thread = cores[CURRENT_CORE].running; |
1004 | 2254 | ||
1005 | old_priority = thread->priority; | 2255 | #if NUM_CORES > 1 |
1006 | thread->priority = priority; | 2256 | /* Thread could be on any list and therefore on an interrupt accessible |
1007 | cores[IF_COP2(thread->core)].highest_priority = 100; | 2257 | one - disable interrupts */ |
1008 | 2258 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | |
2259 | #endif | ||
2260 | unsigned state = GET_THREAD_STATE(thread); | ||
2261 | |||
2262 | /* Make sure it's not killed */ | ||
2263 | if (state != STATE_KILLED) | ||
2264 | { | ||
2265 | old_priority = thread->priority; | ||
2266 | thread->priority = priority; | ||
2267 | cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY; | ||
2268 | } | ||
2269 | |||
2270 | #if NUM_CORES > 1 | ||
2271 | UNLOCK_THREAD(thread, state); | ||
2272 | set_irq_level(oldlevel); | ||
2273 | #endif | ||
1009 | return old_priority; | 2274 | return old_priority; |
1010 | } | 2275 | } |
1011 | 2276 | ||
2277 | /*--------------------------------------------------------------------------- | ||
2278 | * Returns the current priority for a thread. | ||
2279 | *--------------------------------------------------------------------------- | ||
2280 | */ | ||
1012 | int thread_get_priority(struct thread_entry *thread) | 2281 | int thread_get_priority(struct thread_entry *thread) |
1013 | { | 2282 | { |
2283 | /* Simple, quick probe. */ | ||
1014 | if (thread == NULL) | 2284 | if (thread == NULL) |
1015 | thread = cores[CURRENT_CORE].running; | 2285 | thread = cores[CURRENT_CORE].running; |
1016 | 2286 | ||
1017 | return thread->priority; | 2287 | return (unsigned)thread->priority; |
1018 | } | 2288 | } |
1019 | 2289 | ||
2290 | /*--------------------------------------------------------------------------- | ||
2291 | * Yield that guarantees thread execution once per round regardless of | ||
2292 | * thread's scheduler priority - basically a transient realtime boost | ||
2293 | * without altering the scheduler's thread precedence. | ||
2294 | * | ||
2295 | * HACK ALERT! Search for "priority inheritance" for proper treatment. | ||
2296 | *--------------------------------------------------------------------------- | ||
2297 | */ | ||
1020 | void priority_yield(void) | 2298 | void priority_yield(void) |
1021 | { | 2299 | { |
1022 | struct thread_entry *thread = cores[CURRENT_CORE].running; | 2300 | const unsigned int core = CURRENT_CORE; |
1023 | thread->priority_x = 1; | 2301 | struct thread_entry *thread = cores[core].running; |
1024 | switch_thread(true, NULL); | 2302 | thread->priority_x = HIGHEST_PRIORITY; |
1025 | thread->priority_x = 0; | 2303 | switch_thread(NULL); |
2304 | thread->priority_x = LOWEST_PRIORITY; | ||
2305 | cores[core].highest_priority = LOWEST_PRIORITY; | ||
1026 | } | 2306 | } |
1027 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 2307 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
1028 | 2308 | ||
2309 | /* Resumes a frozen thread - similar logic to wakeup_thread except that | ||
2310 | the thread is on no scheduler list at all. It exists simply by virtue of | ||
2311 | the slot having a state of STATE_FROZEN. */ | ||
2312 | void thread_thaw(struct thread_entry *thread) | ||
2313 | { | ||
2314 | #if NUM_CORES > 1 | ||
2315 | /* Thread could be on any list and therefore on an interrupt accessible | ||
2316 | one - disable interrupts */ | ||
2317 | int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
2318 | #endif | ||
2319 | unsigned state = GET_THREAD_STATE(thread); | ||
2320 | |||
2321 | if (state == STATE_FROZEN) | ||
2322 | { | ||
2323 | const unsigned int core = CURRENT_CORE; | ||
2324 | #if NUM_CORES > 1 | ||
2325 | if (thread->core != core) | ||
2326 | { | ||
2327 | core_schedule_wakeup(thread); | ||
2328 | } | ||
2329 | else | ||
2330 | #endif | ||
2331 | { | ||
2332 | add_to_list_l(&cores[core].running, thread); | ||
2333 | } | ||
2334 | |||
2335 | UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); | ||
2336 | return; | ||
2337 | } | ||
2338 | |||
2339 | #if NUM_CORES > 1 | ||
2340 | UNLOCK_THREAD(thread, state); | ||
2341 | set_irq_level(oldlevel); | ||
2342 | #endif | ||
2343 | } | ||
2344 | |||
2345 | /*--------------------------------------------------------------------------- | ||
2346 | * Return the ID of the currently executing thread. | ||
2347 | *--------------------------------------------------------------------------- | ||
2348 | */ | ||
1029 | struct thread_entry * thread_get_current(void) | 2349 | struct thread_entry * thread_get_current(void) |
1030 | { | 2350 | { |
1031 | return cores[CURRENT_CORE].running; | 2351 | return cores[CURRENT_CORE].running; |
1032 | } | 2352 | } |
1033 | 2353 | ||
2354 | #if NUM_CORES > 1 | ||
2355 | /*--------------------------------------------------------------------------- | ||
2356 | * Switch the processor that the currently executing thread runs on. | ||
2357 | *--------------------------------------------------------------------------- | ||
2358 | */ | ||
2359 | unsigned int switch_core(unsigned int new_core) | ||
2360 | { | ||
2361 | const unsigned int core = CURRENT_CORE; | ||
2362 | struct thread_entry *current = cores[core].running; | ||
2363 | struct thread_entry *w; | ||
2364 | int oldlevel; | ||
2365 | |||
2366 | /* Interrupts can access the lists that will be used - disable them */ | ||
2367 | unsigned state = GET_THREAD_STATE(current); | ||
2368 | |||
2369 | if (core == new_core) | ||
2370 | { | ||
2371 | /* No change - just unlock everything and return same core */ | ||
2372 | UNLOCK_THREAD(current, state); | ||
2373 | return core; | ||
2374 | } | ||
2375 | |||
2376 | /* Get us off the running list for the current core */ | ||
2377 | remove_from_list_l(&cores[core].running, current); | ||
2378 | |||
2379 | /* Stash return value (old core) in a safe place */ | ||
2380 | current->retval = core; | ||
2381 | |||
2382 | /* If a timeout hadn't yet been cleaned-up it must be removed now or | ||
2383 | * the other core will likely attempt a removal from the wrong list! */ | ||
2384 | if (current->tmo.prev != NULL) | ||
2385 | { | ||
2386 | remove_from_list_tmo(current); | ||
2387 | } | ||
2388 | |||
2389 | /* Change the core number for this thread slot */ | ||
2390 | current->core = new_core; | ||
2391 | |||
2392 | /* Do not use core_schedule_wakeup here since this will result in | ||
2393 | * the thread starting to run on the other core before being finished on | ||
2394 | * this one. Delay the wakeup list unlock to keep the other core stuck | ||
2395 | * until this thread is ready. */ | ||
2396 | oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); | ||
2397 | w = LOCK_LIST(&cores[new_core].waking); | ||
2398 | ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current); | ||
2399 | |||
2400 | /* Make a callback into device-specific code, unlock the wakeup list so | ||
2401 | * that execution may resume on the new core, unlock our slot and finally | ||
2402 | * restore the interrupt level */ | ||
2403 | cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT | | ||
2404 | TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL; | ||
2405 | cores[core].blk_ops.irq_level = oldlevel; | ||
2406 | cores[core].blk_ops.list_p = &cores[new_core].waking; | ||
2407 | #if CONFIG_CORELOCK == CORELOCK_SWAP | ||
2408 | cores[core].blk_ops.state = STATE_RUNNING; | ||
2409 | cores[core].blk_ops.list_v = w; | ||
2410 | #endif | ||
2411 | |||
2412 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
2413 | current->priority_x = HIGHEST_PRIORITY; | ||
2414 | cores[core].highest_priority = LOWEST_PRIORITY; | ||
2415 | #endif | ||
2416 | /* Do the stack switching, cache_maintenence and switch_thread call - | ||
2417 | requires native code */ | ||
2418 | switch_thread_core(core, current); | ||
2419 | |||
2420 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
2421 | current->priority_x = LOWEST_PRIORITY; | ||
2422 | cores[current->core].highest_priority = LOWEST_PRIORITY; | ||
2423 | #endif | ||
2424 | |||
2425 | /* Finally return the old core to caller */ | ||
2426 | return current->retval; | ||
2427 | (void)state; | ||
2428 | } | ||
2429 | #endif /* NUM_CORES > 1 */ | ||
2430 | |||
2431 | /*--------------------------------------------------------------------------- | ||
2432 | * Initialize threading API. This assumes interrupts are not yet enabled. On | ||
2433 | * multicore setups, no core is allowed to proceed until create_thread calls | ||
2434 | * are safe to perform. | ||
2435 | *--------------------------------------------------------------------------- | ||
2436 | */ | ||
1034 | void init_threads(void) | 2437 | void init_threads(void) |
1035 | { | 2438 | { |
1036 | const unsigned int core = CURRENT_CORE; | 2439 | const unsigned int core = CURRENT_CORE; |
@@ -1038,36 +2441,43 @@ void init_threads(void) | |||
1038 | 2441 | ||
1039 | /* CPU will initialize first and then sleep */ | 2442 | /* CPU will initialize first and then sleep */ |
1040 | slot = find_empty_thread_slot(); | 2443 | slot = find_empty_thread_slot(); |
1041 | #if THREAD_EXTRA_CHECKS | 2444 | |
1042 | /* This can fail if, for example, .bss isn't zero'ed out by the loader | 2445 | if (slot >= MAXTHREADS) |
1043 | or threads is in the wrong section. */ | 2446 | { |
1044 | if (slot < 0) { | 2447 | /* WTF? There really must be a slot available at this stage. |
1045 | panicf("uninitialized threads[]"); | 2448 | * This can fail if, for example, .bss isn't zero'ed out by the loader |
2449 | * or threads is in the wrong section. */ | ||
2450 | THREAD_PANICF("init_threads->no slot", NULL); | ||
1046 | } | 2451 | } |
1047 | #endif | ||
1048 | 2452 | ||
1049 | cores[core].sleeping = NULL; | ||
1050 | cores[core].running = NULL; | 2453 | cores[core].running = NULL; |
1051 | cores[core].waking = NULL; | 2454 | cores[core].timeout = NULL; |
1052 | cores[core].wakeup_list = &cores[core].running; | 2455 | thread_queue_init(&cores[core].waking); |
1053 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 2456 | cores[core].next_tmo_check = current_tick; /* Something not in the past */ |
1054 | cores[core].switch_to_irq_level = STAY_IRQ_LEVEL; | 2457 | #if NUM_CORES > 1 |
2458 | cores[core].blk_ops.flags = 0; | ||
2459 | #else | ||
2460 | cores[core].irq_level = STAY_IRQ_LEVEL; | ||
1055 | #endif | 2461 | #endif |
1056 | threads[slot].name = main_thread_name; | 2462 | threads[slot].name = main_thread_name; |
1057 | threads[slot].statearg = 0; | 2463 | UNLOCK_THREAD_SET_STATE(&threads[slot], STATE_RUNNING); /* No sync worries yet */ |
1058 | threads[slot].context.start = 0; /* core's main thread already running */ | 2464 | threads[slot].context.start = NULL; /* core's main thread already running */ |
2465 | threads[slot].tmo.prev = NULL; | ||
2466 | threads[slot].queue = NULL; | ||
2467 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
2468 | threads[slot].boosted = 0; | ||
2469 | #endif | ||
1059 | #if NUM_CORES > 1 | 2470 | #if NUM_CORES > 1 |
1060 | threads[slot].core = core; | 2471 | threads[slot].core = core; |
1061 | #endif | 2472 | #endif |
1062 | #ifdef HAVE_PRIORITY_SCHEDULING | 2473 | #ifdef HAVE_PRIORITY_SCHEDULING |
1063 | threads[slot].priority = PRIORITY_USER_INTERFACE; | 2474 | threads[slot].priority = PRIORITY_USER_INTERFACE; |
1064 | threads[slot].priority_x = 0; | 2475 | threads[slot].priority_x = LOWEST_PRIORITY; |
1065 | cores[core].highest_priority = 100; | 2476 | cores[core].highest_priority = LOWEST_PRIORITY; |
1066 | #endif | 2477 | #endif |
1067 | add_to_list(&cores[core].running, &threads[slot]); | 2478 | |
1068 | 2479 | add_to_list_l(&cores[core].running, &threads[slot]); | |
1069 | /* In multiple core setups, each core has a different stack. There is | 2480 | |
1070 | * probably a much better way to do this. */ | ||
1071 | if (core == CPU) | 2481 | if (core == CPU) |
1072 | { | 2482 | { |
1073 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 2483 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
@@ -1076,22 +2486,19 @@ void init_threads(void) | |||
1076 | threads[slot].stack = stackbegin; | 2486 | threads[slot].stack = stackbegin; |
1077 | threads[slot].stack_size = (int)stackend - (int)stackbegin; | 2487 | threads[slot].stack_size = (int)stackend - (int)stackbegin; |
1078 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ | 2488 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ |
1079 | /* Mark CPU initialized */ | ||
1080 | cores[CPU].kernel_running = true; | ||
1081 | /* Do _not_ wait for the COP to init in the bootloader because it doesn't */ | ||
1082 | /* TODO: HAL interface for this */ | 2489 | /* TODO: HAL interface for this */ |
1083 | /* Wake up coprocessor and let it initialize kernel and threads */ | 2490 | /* Wake up coprocessor and let it initialize kernel and threads */ |
1084 | COP_CTL = PROC_WAKE; | 2491 | COP_CTL = PROC_WAKE; |
1085 | /* Sleep until finished */ | 2492 | /* Sleep until finished */ |
1086 | CPU_CTL = PROC_SLEEP; | 2493 | CPU_CTL = PROC_SLEEP; |
1087 | } | 2494 | } |
1088 | else | 2495 | else |
1089 | { | 2496 | { |
1090 | /* Initial stack is the COP idle stack */ | 2497 | /* Initial stack is the COP idle stack */ |
1091 | threads[slot].stack = cop_idlestackbegin; | 2498 | threads[slot].stack = cop_idlestackbegin; |
1092 | threads[slot].stack_size = IDLE_STACK_SIZE; | 2499 | threads[slot].stack_size = IDLE_STACK_SIZE; |
1093 | /* Mark COP initialized */ | 2500 | /* Mark COP initialized */ |
1094 | cores[COP].kernel_running = true; | 2501 | cores[COP].blk_ops.flags = 0; |
1095 | /* Get COP safely primed inside switch_thread where it will remain | 2502 | /* Get COP safely primed inside switch_thread where it will remain |
1096 | * until a thread actually exists on it */ | 2503 | * until a thread actually exists on it */ |
1097 | CPU_CTL = PROC_WAKE; | 2504 | CPU_CTL = PROC_WAKE; |
@@ -1100,19 +2507,28 @@ void init_threads(void) | |||
1100 | } | 2507 | } |
1101 | } | 2508 | } |
1102 | 2509 | ||
2510 | /*--------------------------------------------------------------------------- | ||
2511 | * Returns the maximum percentage of stack a thread ever used while running. | ||
2512 | * NOTE: Some large buffer allocations that don't use enough the buffer to | ||
2513 | * overwrite stackptr[0] will not be seen. | ||
2514 | *--------------------------------------------------------------------------- | ||
2515 | */ | ||
1103 | int thread_stack_usage(const struct thread_entry *thread) | 2516 | int thread_stack_usage(const struct thread_entry *thread) |
1104 | { | 2517 | { |
1105 | unsigned int i; | ||
1106 | unsigned int *stackptr = thread->stack; | 2518 | unsigned int *stackptr = thread->stack; |
2519 | int stack_words = thread->stack_size / sizeof (int); | ||
2520 | int i, usage = 0; | ||
1107 | 2521 | ||
1108 | for (i = 0;i < thread->stack_size/sizeof(int);i++) | 2522 | for (i = 0; i < stack_words; i++) |
1109 | { | 2523 | { |
1110 | if (stackptr[i] != DEADBEEF) | 2524 | if (stackptr[i] != DEADBEEF) |
2525 | { | ||
2526 | usage = ((stack_words - i) * 100) / stack_words; | ||
1111 | break; | 2527 | break; |
2528 | } | ||
1112 | } | 2529 | } |
1113 | 2530 | ||
1114 | return ((thread->stack_size - i * sizeof(int)) * 100) / | 2531 | return usage; |
1115 | thread->stack_size; | ||
1116 | } | 2532 | } |
1117 | 2533 | ||
1118 | #if NUM_CORES > 1 | 2534 | #if NUM_CORES > 1 |
@@ -1139,9 +2555,14 @@ int idle_stack_usage(unsigned int core) | |||
1139 | } | 2555 | } |
1140 | #endif | 2556 | #endif |
1141 | 2557 | ||
1142 | int thread_get_status(const struct thread_entry *thread) | 2558 | /*--------------------------------------------------------------------------- |
2559 | * Returns the current thread status. This is a snapshot for debugging and | ||
2560 | * does not do any slot synchronization so it could return STATE_BUSY. | ||
2561 | *--------------------------------------------------------------------------- | ||
2562 | */ | ||
2563 | unsigned thread_get_status(const struct thread_entry *thread) | ||
1143 | { | 2564 | { |
1144 | return GET_STATE(thread->statearg); | 2565 | return thread->state; |
1145 | } | 2566 | } |
1146 | 2567 | ||
1147 | /*--------------------------------------------------------------------------- | 2568 | /*--------------------------------------------------------------------------- |
@@ -1163,7 +2584,7 @@ void thread_get_name(char *buffer, int size, | |||
1163 | /* Display thread name if one or ID if none */ | 2584 | /* Display thread name if one or ID if none */ |
1164 | const char *name = thread->name; | 2585 | const char *name = thread->name; |
1165 | const char *fmt = "%s"; | 2586 | const char *fmt = "%s"; |
1166 | if (name == NULL || *name == '\0') | 2587 | if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0') |
1167 | { | 2588 | { |
1168 | name = (const char *)thread; | 2589 | name = (const char *)thread; |
1169 | fmt = "%08lX"; | 2590 | fmt = "%08lX"; |
diff --git a/firmware/usb.c b/firmware/usb.c index af09aecff9..f79af98518 100644 --- a/firmware/usb.c +++ b/firmware/usb.c | |||
@@ -66,7 +66,7 @@ static int usb_mmc_countdown = 0; | |||
66 | static long usb_stack[(DEFAULT_STACK_SIZE + 0x800)/sizeof(long)]; | 66 | static long usb_stack[(DEFAULT_STACK_SIZE + 0x800)/sizeof(long)]; |
67 | static const char usb_thread_name[] = "usb"; | 67 | static const char usb_thread_name[] = "usb"; |
68 | #endif | 68 | #endif |
69 | static struct event_queue usb_queue; | 69 | static struct event_queue usb_queue NOCACHEBSS_ATTR; |
70 | static int last_usb_status; | 70 | static int last_usb_status; |
71 | static bool usb_monitor_enabled; | 71 | static bool usb_monitor_enabled; |
72 | 72 | ||
@@ -119,7 +119,7 @@ static void usb_thread(void) | |||
119 | { | 119 | { |
120 | int num_acks_to_expect = -1; | 120 | int num_acks_to_expect = -1; |
121 | bool waiting_for_ack; | 121 | bool waiting_for_ack; |
122 | struct event ev; | 122 | struct queue_event ev; |
123 | 123 | ||
124 | waiting_for_ack = false; | 124 | waiting_for_ack = false; |
125 | 125 | ||
@@ -307,9 +307,9 @@ void usb_init(void) | |||
307 | #ifndef BOOTLOADER | 307 | #ifndef BOOTLOADER |
308 | queue_init(&usb_queue, true); | 308 | queue_init(&usb_queue, true); |
309 | 309 | ||
310 | create_thread(usb_thread, usb_stack, sizeof(usb_stack), | 310 | create_thread(usb_thread, usb_stack, sizeof(usb_stack), 0, |
311 | usb_thread_name IF_PRIO(, PRIORITY_SYSTEM) | 311 | usb_thread_name IF_PRIO(, PRIORITY_SYSTEM) |
312 | IF_COP(, CPU, false)); | 312 | IF_COP(, CPU)); |
313 | 313 | ||
314 | tick_add_task(usb_tick); | 314 | tick_add_task(usb_tick); |
315 | #endif | 315 | #endif |
@@ -318,7 +318,7 @@ void usb_init(void) | |||
318 | 318 | ||
319 | void usb_wait_for_disconnect(struct event_queue *q) | 319 | void usb_wait_for_disconnect(struct event_queue *q) |
320 | { | 320 | { |
321 | struct event ev; | 321 | struct queue_event ev; |
322 | 322 | ||
323 | /* Don't return until we get SYS_USB_DISCONNECTED */ | 323 | /* Don't return until we get SYS_USB_DISCONNECTED */ |
324 | while(1) | 324 | while(1) |
@@ -334,7 +334,7 @@ void usb_wait_for_disconnect(struct event_queue *q) | |||
334 | 334 | ||
335 | int usb_wait_for_disconnect_w_tmo(struct event_queue *q, int ticks) | 335 | int usb_wait_for_disconnect_w_tmo(struct event_queue *q, int ticks) |
336 | { | 336 | { |
337 | struct event ev; | 337 | struct queue_event ev; |
338 | 338 | ||
339 | /* Don't return until we get SYS_USB_DISCONNECTED or SYS_TIMEOUT */ | 339 | /* Don't return until we get SYS_USB_DISCONNECTED or SYS_TIMEOUT */ |
340 | while(1) | 340 | while(1) |