diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2010-06-10 17:31:45 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2010-06-10 17:31:45 +0000 |
commit | 05ca8978c4fe965a619f016d79aaf6955767abf9 (patch) | |
tree | 606a19c322864fa823fda7c0a6daf998f76417e3 | |
parent | 863891ce9aef50fde13cf3df897aca144a2c570a (diff) | |
download | rockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.tar.gz rockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.zip |
Clean unused stuff out of thread.h and config.h and reorganize thread-pp.c to simplify the preprocessor blocks.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26743 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r-- | firmware/export/config.h | 23 | ||||
-rw-r--r-- | firmware/export/thread.h | 126 | ||||
-rw-r--r-- | firmware/target/arm/thread-pp.c | 526 |
3 files changed, 284 insertions, 391 deletions
diff --git a/firmware/export/config.h b/firmware/export/config.h index 2039aa55f9..5947ca171e 100644 --- a/firmware/export/config.h +++ b/firmware/export/config.h | |||
@@ -793,11 +793,6 @@ Lyre prototype 1 */ | |||
793 | #define FORCE_SINGLE_CORE | 793 | #define FORCE_SINGLE_CORE |
794 | #endif | 794 | #endif |
795 | 795 | ||
796 | /* Core locking types - specifies type of atomic operation */ | ||
797 | #define CORELOCK_NONE 0 | ||
798 | #define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm | ||
799 | and not a special semaphore instruction */ | ||
800 | |||
801 | #if defined(CPU_PP) | 796 | #if defined(CPU_PP) |
802 | #define IDLE_STACK_SIZE 0x80 | 797 | #define IDLE_STACK_SIZE 0x80 |
803 | #define IDLE_STACK_WORDS 0x20 | 798 | #define IDLE_STACK_WORDS 0x20 |
@@ -811,6 +806,7 @@ Lyre prototype 1 */ | |||
811 | #if !defined(FORCE_SINGLE_CORE) | 806 | #if !defined(FORCE_SINGLE_CORE) |
812 | 807 | ||
813 | #define NUM_CORES 2 | 808 | #define NUM_CORES 2 |
809 | #define HAVE_CORELOCK_OBJECT | ||
814 | #define CURRENT_CORE current_core() | 810 | #define CURRENT_CORE current_core() |
815 | /* Attributes for core-shared data in DRAM where IRAM is better used for other | 811 | /* Attributes for core-shared data in DRAM where IRAM is better used for other |
816 | * purposes. */ | 812 | * purposes. */ |
@@ -821,9 +817,7 @@ Lyre prototype 1 */ | |||
821 | #define IF_COP_VOID(...) __VA_ARGS__ | 817 | #define IF_COP_VOID(...) __VA_ARGS__ |
822 | #define IF_COP_CORE(core) core | 818 | #define IF_COP_CORE(core) core |
823 | 819 | ||
824 | #define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */ | 820 | #endif /* !defined(FORCE_SINGLE_CORE) */ |
825 | |||
826 | #endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */ | ||
827 | 821 | ||
828 | #endif /* CPU_PP */ | 822 | #endif /* CPU_PP */ |
829 | 823 | ||
@@ -832,18 +826,6 @@ Lyre prototype 1 */ | |||
832 | #define NOCACHEDATA_ATTR __attribute__((section(".ncdata"),nocommon)) | 826 | #define NOCACHEDATA_ATTR __attribute__((section(".ncdata"),nocommon)) |
833 | #endif | 827 | #endif |
834 | 828 | ||
835 | #ifndef CONFIG_CORELOCK | ||
836 | #define CONFIG_CORELOCK CORELOCK_NONE | ||
837 | #endif | ||
838 | |||
839 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
840 | #define IF_SWCL(...) __VA_ARGS__ | ||
841 | #define IFN_SWCL(...) | ||
842 | #else | ||
843 | #define IF_SWCL(...) | ||
844 | #define IFN_SWCL(...) __VA_ARGS__ | ||
845 | #endif /* CONFIG_CORELOCK == */ | ||
846 | |||
847 | #ifndef NUM_CORES | 829 | #ifndef NUM_CORES |
848 | /* Default to single core */ | 830 | /* Default to single core */ |
849 | #define NUM_CORES 1 | 831 | #define NUM_CORES 1 |
@@ -855,7 +837,6 @@ Lyre prototype 1 */ | |||
855 | #define NOCACHEBSS_ATTR | 837 | #define NOCACHEBSS_ATTR |
856 | #define NOCACHEDATA_ATTR | 838 | #define NOCACHEDATA_ATTR |
857 | #endif | 839 | #endif |
858 | #define CONFIG_CORELOCK CORELOCK_NONE | ||
859 | 840 | ||
860 | #define IF_COP(...) | 841 | #define IF_COP(...) |
861 | #define IF_COP_VOID(...) void | 842 | #define IF_COP_VOID(...) void |
diff --git a/firmware/export/thread.h b/firmware/export/thread.h index a26b5962e2..8912283343 100644 --- a/firmware/export/thread.h +++ b/firmware/export/thread.h | |||
@@ -109,6 +109,23 @@ struct regs | |||
109 | uint32_t lr; /* 36 - r14 (lr) */ | 109 | uint32_t lr; /* 36 - r14 (lr) */ |
110 | uint32_t start; /* 40 - Thread start address, or NULL when started */ | 110 | uint32_t start; /* 40 - Thread start address, or NULL when started */ |
111 | }; | 111 | }; |
112 | |||
113 | #ifdef CPU_PP | ||
114 | #ifdef HAVE_CORELOCK_OBJECT | ||
115 | /* No reliable atomic instruction available - use Peterson's algorithm */ | ||
116 | struct corelock | ||
117 | { | ||
118 | volatile unsigned char myl[NUM_CORES]; | ||
119 | volatile unsigned char turn; | ||
120 | } __attribute__((packed)); | ||
121 | |||
122 | /* Too big to inline everywhere */ | ||
123 | void corelock_init(struct corelock *cl); | ||
124 | void corelock_lock(struct corelock *cl); | ||
125 | int corelock_try_lock(struct corelock *cl); | ||
126 | void corelock_unlock(struct corelock *cl); | ||
127 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
128 | #endif /* CPU_PP */ | ||
112 | #elif defined(CPU_MIPS) | 129 | #elif defined(CPU_MIPS) |
113 | struct regs | 130 | struct regs |
114 | { | 131 | { |
@@ -162,26 +179,13 @@ struct thread_list | |||
162 | struct thread_entry *next; /* Next thread in a list */ | 179 | struct thread_entry *next; /* Next thread in a list */ |
163 | }; | 180 | }; |
164 | 181 | ||
165 | /* Small objects for core-wise mutual exclusion */ | 182 | #ifndef HAVE_CORELOCK_OBJECT |
166 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
167 | /* No reliable atomic instruction available - use Peterson's algorithm */ | ||
168 | struct corelock | ||
169 | { | ||
170 | volatile unsigned char myl[NUM_CORES]; | ||
171 | volatile unsigned char turn; | ||
172 | } __attribute__((packed)); | ||
173 | |||
174 | void corelock_init(struct corelock *cl); | ||
175 | void corelock_lock(struct corelock *cl); | ||
176 | int corelock_try_lock(struct corelock *cl); | ||
177 | void corelock_unlock(struct corelock *cl); | ||
178 | #else | ||
179 | /* No atomic corelock op needed or just none defined */ | 183 | /* No atomic corelock op needed or just none defined */ |
180 | #define corelock_init(cl) | 184 | #define corelock_init(cl) |
181 | #define corelock_lock(cl) | 185 | #define corelock_lock(cl) |
182 | #define corelock_try_lock(cl) | 186 | #define corelock_try_lock(cl) |
183 | #define corelock_unlock(cl) | 187 | #define corelock_unlock(cl) |
184 | #endif /* core locking selection */ | 188 | #endif /* HAVE_CORELOCK_OBJECT */ |
185 | 189 | ||
186 | #ifdef HAVE_PRIORITY_SCHEDULING | 190 | #ifdef HAVE_PRIORITY_SCHEDULING |
187 | struct blocker | 191 | struct blocker |
@@ -341,98 +345,6 @@ struct core_entry | |||
341 | #define IFN_PRIO(...) __VA_ARGS__ | 345 | #define IFN_PRIO(...) __VA_ARGS__ |
342 | #endif | 346 | #endif |
343 | 347 | ||
344 | /* Macros generate better code than an inline function is this case */ | ||
345 | #if defined (CPU_ARM) | ||
346 | /* atomic */ | ||
347 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
348 | #define test_and_set(a, v, cl) \ | ||
349 | xchg8((a), (v), (cl)) | ||
350 | /* atomic */ | ||
351 | #define xchg8(a, v, cl) \ | ||
352 | ({ uint32_t o; \ | ||
353 | corelock_lock(cl); \ | ||
354 | o = *(uint8_t *)(a); \ | ||
355 | *(uint8_t *)(a) = (v); \ | ||
356 | corelock_unlock(cl); \ | ||
357 | o; }) | ||
358 | #define xchg32(a, v, cl) \ | ||
359 | ({ uint32_t o; \ | ||
360 | corelock_lock(cl); \ | ||
361 | o = *(uint32_t *)(a); \ | ||
362 | *(uint32_t *)(a) = (v); \ | ||
363 | corelock_unlock(cl); \ | ||
364 | o; }) | ||
365 | #define xchgptr(a, v, cl) \ | ||
366 | ({ typeof (*(a)) o; \ | ||
367 | corelock_lock(cl); \ | ||
368 | o = *(a); \ | ||
369 | *(a) = (v); \ | ||
370 | corelock_unlock(cl); \ | ||
371 | o; }) | ||
372 | #endif /* locking selection */ | ||
373 | #elif defined (CPU_COLDFIRE) | ||
374 | /* atomic */ | ||
375 | /* one branch will be optimized away if v is a constant expression */ | ||
376 | #define test_and_set(a, v, ...) \ | ||
377 | ({ uint32_t o = 0; \ | ||
378 | if (v) { \ | ||
379 | asm volatile ( \ | ||
380 | "bset.b #0, (%0)" \ | ||
381 | : : "a"((uint8_t*)(a)) \ | ||
382 | : "cc"); \ | ||
383 | } else { \ | ||
384 | asm volatile ( \ | ||
385 | "bclr.b #0, (%0)" \ | ||
386 | : : "a"((uint8_t*)(a)) \ | ||
387 | : "cc"); \ | ||
388 | } \ | ||
389 | asm volatile ("sne.b %0" \ | ||
390 | : "+d"(o)); \ | ||
391 | o; }) | ||
392 | #elif CONFIG_CPU == SH7034 | ||
393 | /* atomic */ | ||
394 | #define test_and_set(a, v, ...) \ | ||
395 | ({ uint32_t o; \ | ||
396 | asm volatile ( \ | ||
397 | "tas.b @%2 \n" \ | ||
398 | "mov #-1, %0 \n" \ | ||
399 | "negc %0, %0 \n" \ | ||
400 | : "=r"(o) \ | ||
401 | : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \ | ||
402 | "r"((uint8_t *)(a))); \ | ||
403 | o; }) | ||
404 | #endif /* CONFIG_CPU == */ | ||
405 | |||
406 | /* defaults for no asm version */ | ||
407 | #ifndef test_and_set | ||
408 | /* not atomic */ | ||
409 | #define test_and_set(a, v, ...) \ | ||
410 | ({ uint32_t o = *(uint8_t *)(a); \ | ||
411 | *(uint8_t *)(a) = (v); \ | ||
412 | o; }) | ||
413 | #endif /* test_and_set */ | ||
414 | #ifndef xchg8 | ||
415 | /* not atomic */ | ||
416 | #define xchg8(a, v, ...) \ | ||
417 | ({ uint32_t o = *(uint8_t *)(a); \ | ||
418 | *(uint8_t *)(a) = (v); \ | ||
419 | o; }) | ||
420 | #endif /* xchg8 */ | ||
421 | #ifndef xchg32 | ||
422 | /* not atomic */ | ||
423 | #define xchg32(a, v, ...) \ | ||
424 | ({ uint32_t o = *(uint32_t *)(a); \ | ||
425 | *(uint32_t *)(a) = (v); \ | ||
426 | o; }) | ||
427 | #endif /* xchg32 */ | ||
428 | #ifndef xchgptr | ||
429 | /* not atomic */ | ||
430 | #define xchgptr(a, v, ...) \ | ||
431 | ({ typeof (*(a)) o = *(a); \ | ||
432 | *(a) = (v); \ | ||
433 | o; }) | ||
434 | #endif /* xchgptr */ | ||
435 | |||
436 | void core_idle(void); | 348 | void core_idle(void); |
437 | void core_wake(IF_COP_VOID(unsigned int core)); | 349 | void core_wake(IF_COP_VOID(unsigned int core)); |
438 | 350 | ||
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c index 8dfbd64080..335f1f3e0a 100644 --- a/firmware/target/arm/thread-pp.c +++ b/firmware/target/arm/thread-pp.c | |||
@@ -26,7 +26,21 @@ | |||
26 | #define IF_NO_SKIP_YIELD(...) __VA_ARGS__ | 26 | #define IF_NO_SKIP_YIELD(...) __VA_ARGS__ |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #if NUM_CORES > 1 | 29 | #if NUM_CORES == 1 |
30 | /* Single-core variants for FORCE_SINGLE_CORE */ | ||
31 | static inline void core_sleep(void) | ||
32 | { | ||
33 | sleep_core(CURRENT_CORE); | ||
34 | enable_irq(); | ||
35 | } | ||
36 | |||
37 | /* Shared single-core build debugging version */ | ||
38 | void core_wake(void) | ||
39 | { | ||
40 | /* No wakey - core already wakey (because this is it) */ | ||
41 | } | ||
42 | #else /* NUM_CORES > 1 */ | ||
43 | /** Model-generic PP dual-core code **/ | ||
30 | extern uintptr_t cpu_idlestackbegin[]; | 44 | extern uintptr_t cpu_idlestackbegin[]; |
31 | extern uintptr_t cpu_idlestackend[]; | 45 | extern uintptr_t cpu_idlestackend[]; |
32 | extern uintptr_t cop_idlestackbegin[]; | 46 | extern uintptr_t cop_idlestackbegin[]; |
@@ -37,23 +51,7 @@ static uintptr_t * const idle_stacks[NUM_CORES] = | |||
37 | [COP] = cop_idlestackbegin | 51 | [COP] = cop_idlestackbegin |
38 | }; | 52 | }; |
39 | 53 | ||
40 | #if CONFIG_CPU == PP5002 | 54 | /* Core locks using Peterson's mutual exclusion algorithm */ |
41 | /* Bytes to emulate the PP502x mailbox bits */ | ||
42 | struct core_semaphores | ||
43 | { | ||
44 | volatile uint8_t intend_wake; /* 00h */ | ||
45 | volatile uint8_t stay_awake; /* 01h */ | ||
46 | volatile uint8_t intend_sleep; /* 02h */ | ||
47 | volatile uint8_t unused; /* 03h */ | ||
48 | }; | ||
49 | |||
50 | static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR; | ||
51 | #endif /* CONFIG_CPU == PP5002 */ | ||
52 | |||
53 | #endif /* NUM_CORES */ | ||
54 | |||
55 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
56 | /* Software core locks using Peterson's mutual exclusion algorithm */ | ||
57 | 55 | ||
58 | /*--------------------------------------------------------------------------- | 56 | /*--------------------------------------------------------------------------- |
59 | * Initialize the corelock structure. | 57 | * Initialize the corelock structure. |
@@ -69,8 +67,7 @@ void corelock_init(struct corelock *cl) | |||
69 | * Wait for the corelock to become free and acquire it when it does. | 67 | * Wait for the corelock to become free and acquire it when it does. |
70 | *--------------------------------------------------------------------------- | 68 | *--------------------------------------------------------------------------- |
71 | */ | 69 | */ |
72 | void corelock_lock(struct corelock *cl) __attribute__((naked)); | 70 | void __attribute__((naked)) corelock_lock(struct corelock *cl) |
73 | void corelock_lock(struct corelock *cl) | ||
74 | { | 71 | { |
75 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | 72 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ |
76 | asm volatile ( | 73 | asm volatile ( |
@@ -96,8 +93,7 @@ void corelock_lock(struct corelock *cl) | |||
96 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | 93 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. |
97 | *--------------------------------------------------------------------------- | 94 | *--------------------------------------------------------------------------- |
98 | */ | 95 | */ |
99 | int corelock_try_lock(struct corelock *cl) __attribute__((naked)); | 96 | int __attribute__((naked)) corelock_try_lock(struct corelock *cl) |
100 | int corelock_try_lock(struct corelock *cl) | ||
101 | { | 97 | { |
102 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | 98 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ |
103 | asm volatile ( | 99 | asm volatile ( |
@@ -125,8 +121,7 @@ int corelock_try_lock(struct corelock *cl) | |||
125 | * Release ownership of the corelock | 121 | * Release ownership of the corelock |
126 | *--------------------------------------------------------------------------- | 122 | *--------------------------------------------------------------------------- |
127 | */ | 123 | */ |
128 | void corelock_unlock(struct corelock *cl) __attribute__((naked)); | 124 | void __attribute__((naked)) corelock_unlock(struct corelock *cl) |
129 | void corelock_unlock(struct corelock *cl) | ||
130 | { | 125 | { |
131 | asm volatile ( | 126 | asm volatile ( |
132 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | 127 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ |
@@ -138,11 +133,9 @@ void corelock_unlock(struct corelock *cl) | |||
138 | ); | 133 | ); |
139 | (void)cl; | 134 | (void)cl; |
140 | } | 135 | } |
136 | |||
141 | #else /* C versions for reference */ | 137 | #else /* C versions for reference */ |
142 | /*--------------------------------------------------------------------------- | 138 | |
143 | * Wait for the corelock to become free and aquire it when it does. | ||
144 | *--------------------------------------------------------------------------- | ||
145 | */ | ||
146 | void corelock_lock(struct corelock *cl) | 139 | void corelock_lock(struct corelock *cl) |
147 | { | 140 | { |
148 | const unsigned int core = CURRENT_CORE; | 141 | const unsigned int core = CURRENT_CORE; |
@@ -158,10 +151,6 @@ void corelock_lock(struct corelock *cl) | |||
158 | } | 151 | } |
159 | } | 152 | } |
160 | 153 | ||
161 | /*--------------------------------------------------------------------------- | ||
162 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
163 | *--------------------------------------------------------------------------- | ||
164 | */ | ||
165 | int corelock_try_lock(struct corelock *cl) | 154 | int corelock_try_lock(struct corelock *cl) |
166 | { | 155 | { |
167 | const unsigned int core = CURRENT_CORE; | 156 | const unsigned int core = CURRENT_CORE; |
@@ -179,85 +168,141 @@ int corelock_try_lock(struct corelock *cl) | |||
179 | return 0; | 168 | return 0; |
180 | } | 169 | } |
181 | 170 | ||
182 | /*--------------------------------------------------------------------------- | ||
183 | * Release ownership of the corelock | ||
184 | *--------------------------------------------------------------------------- | ||
185 | */ | ||
186 | void corelock_unlock(struct corelock *cl) | 171 | void corelock_unlock(struct corelock *cl) |
187 | { | 172 | { |
188 | cl->myl[CURRENT_CORE] = 0; | 173 | cl->myl[CURRENT_CORE] = 0; |
189 | } | 174 | } |
190 | #endif /* ASM / C selection */ | 175 | #endif /* ASM / C selection */ |
191 | 176 | ||
192 | #endif /* CONFIG_CORELOCK == SW_CORELOCK */ | ||
193 | |||
194 | /*--------------------------------------------------------------------------- | 177 | /*--------------------------------------------------------------------------- |
195 | * Put core in a power-saving state if waking list wasn't repopulated and if | 178 | * Do any device-specific inits for the threads and synchronize the kernel |
196 | * no other core requested a wakeup for it to perform a task. | 179 | * initializations. |
197 | *--------------------------------------------------------------------------- | 180 | *--------------------------------------------------------------------------- |
198 | */ | 181 | */ |
199 | #ifdef CPU_PP502x | 182 | static void INIT_ATTR core_thread_init(unsigned int core) |
200 | #if NUM_CORES == 1 | ||
201 | static inline void core_sleep(void) | ||
202 | { | 183 | { |
203 | sleep_core(CURRENT_CORE); | 184 | if (core == CPU) |
204 | enable_irq(); | 185 | { |
186 | /* Wake up coprocessor and let it initialize kernel and threads */ | ||
187 | #ifdef CPU_PP502x | ||
188 | MBX_MSG_CLR = 0x3f; | ||
189 | #endif | ||
190 | wake_core(COP); | ||
191 | /* Sleep until COP has finished */ | ||
192 | sleep_core(CPU); | ||
193 | } | ||
194 | else | ||
195 | { | ||
196 | /* Wake the CPU and return */ | ||
197 | wake_core(CPU); | ||
198 | } | ||
205 | } | 199 | } |
206 | #else | 200 | |
207 | static inline void core_sleep(unsigned int core) | 201 | /*--------------------------------------------------------------------------- |
202 | * Switches to a stack that always resides in the Rockbox core. | ||
203 | * | ||
204 | * Needed when a thread suicides on a core other than the main CPU since the | ||
205 | * stack used when idling is the stack of the last thread to run. This stack | ||
206 | * may not reside in the core firmware in which case the core will continue | ||
207 | * to use a stack from an unloaded module until another thread runs on it. | ||
208 | *--------------------------------------------------------------------------- | ||
209 | */ | ||
210 | static inline void switch_to_idle_stack(const unsigned int core) | ||
208 | { | 211 | { |
209 | #if 1 | ||
210 | asm volatile ( | 212 | asm volatile ( |
211 | "mov r0, #4 \n" /* r0 = 0x4 << core */ | 213 | "str sp, [%0] \n" /* save original stack pointer on idle stack */ |
212 | "mov r0, r0, lsl %[c] \n" | 214 | "mov sp, %0 \n" /* switch stacks */ |
213 | "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ | 215 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); |
214 | "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */ | 216 | (void)core; |
215 | "tst r1, r0, lsl #2 \n" | 217 | } |
216 | "moveq r1, #0x80000000 \n" /* Then sleep */ | ||
217 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
218 | "moveq r1, #0 \n" /* Clear control reg */ | ||
219 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
220 | "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */ | ||
221 | "str r1, [%[mbx], #8] \n" | ||
222 | "1: \n" /* Wait for wake procedure to finish */ | ||
223 | "ldr r1, [%[mbx], #0] \n" | ||
224 | "tst r1, r0, lsr #2 \n" | ||
225 | "bne 1b \n" | ||
226 | : | ||
227 | : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core) | ||
228 | : "r0", "r1"); | ||
229 | #else /* C version for reference */ | ||
230 | /* Signal intent to sleep */ | ||
231 | MBX_MSG_SET = 0x4 << core; | ||
232 | 218 | ||
233 | /* Something waking or other processor intends to wake us? */ | 219 | /*--------------------------------------------------------------------------- |
234 | if ((MBX_MSG_STAT & (0x10 << core)) == 0) | 220 | * Perform core switch steps that need to take place inside switch_thread. |
235 | { | 221 | * |
236 | sleep_core(core); | 222 | * These steps must take place while before changing the processor and after |
237 | wake_core(core); | 223 | * having entered switch_thread since switch_thread may not do a normal return |
238 | } | 224 | * because the stack being used for anything the compiler saved will not belong |
225 | * to the thread's destination core and it may have been recycled for other | ||
226 | * purposes by the time a normal context load has taken place. switch_thread | ||
227 | * will also clobber anything stashed in the thread's context or stored in the | ||
228 | * nonvolatile registers if it is saved there before the call since the | ||
229 | * compiler's order of operations cannot be known for certain. | ||
230 | */ | ||
231 | static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) | ||
232 | { | ||
233 | /* Flush our data to ram */ | ||
234 | cpucache_flush(); | ||
235 | /* Stash thread in r4 slot */ | ||
236 | thread->context.r[0] = (uint32_t)thread; | ||
237 | /* Stash restart address in r5 slot */ | ||
238 | thread->context.r[1] = thread->context.start; | ||
239 | /* Save sp in context.sp while still running on old core */ | ||
240 | thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1]; | ||
241 | } | ||
239 | 242 | ||
240 | /* Signal wake - clear wake flag */ | 243 | /*--------------------------------------------------------------------------- |
241 | MBX_MSG_CLR = 0x14 << core; | 244 | * Machine-specific helper function for switching the processor a thread is |
245 | * running on. Basically, the thread suicides on the departing core and is | ||
246 | * reborn on the destination. Were it not for gcc's ill-behavior regarding | ||
247 | * naked functions written in C where it actually clobbers non-volatile | ||
248 | * registers before the intended prologue code, this would all be much | ||
249 | * simpler. Generic setup is done in switch_core itself. | ||
250 | */ | ||
242 | 251 | ||
243 | /* Wait for other processor to finish wake procedure */ | 252 | /*--------------------------------------------------------------------------- |
244 | while (MBX_MSG_STAT & (0x1 << core)); | 253 | * This actually performs the core switch. |
245 | #endif /* ASM/C selection */ | 254 | */ |
246 | enable_irq(); | 255 | static void __attribute__((naked)) |
247 | } | 256 | switch_thread_core(unsigned int core, struct thread_entry *thread) |
248 | #endif /* NUM_CORES */ | ||
249 | #elif CONFIG_CPU == PP5002 | ||
250 | #if NUM_CORES == 1 | ||
251 | static inline void core_sleep(void) | ||
252 | { | 257 | { |
253 | sleep_core(CURRENT_CORE); | 258 | /* Pure asm for this because compiler behavior isn't sufficiently predictable. |
254 | enable_irq(); | 259 | * Stack access also isn't permitted until restoring the original stack and |
260 | * context. */ | ||
261 | asm volatile ( | ||
262 | "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ | ||
263 | "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ | ||
264 | "ldr r2, [r2, r0, lsl #2] \n" | ||
265 | "add r2, r2, %0*4 \n" | ||
266 | "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ | ||
267 | "mov sp, r2 \n" /* switch stacks */ | ||
268 | "adr r2, 1f \n" /* r2 = new core restart address */ | ||
269 | "str r2, [r1, #40] \n" /* thread->context.start = r2 */ | ||
270 | "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ | ||
271 | "1: \n" | ||
272 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ | ||
273 | "mov r1, #0 \n" /* Clear start address */ | ||
274 | "str r1, [r0, #40] \n" | ||
275 | "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */ | ||
276 | "mov lr, pc \n" | ||
277 | "bx r0 \n" | ||
278 | "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ | ||
279 | : : "i"(IDLE_STACK_WORDS) | ||
280 | ); | ||
281 | (void)core; (void)thread; | ||
255 | } | 282 | } |
256 | #else | 283 | |
257 | /* PP5002 has no mailboxes - emulate using bytes */ | 284 | /** PP-model-specific dual-core code **/ |
285 | |||
286 | #if CONFIG_CPU == PP5002 | ||
287 | /* PP5002 has no mailboxes - Bytes to emulate the PP502x mailbox bits */ | ||
288 | struct core_semaphores | ||
289 | { | ||
290 | volatile uint8_t intend_wake; /* 00h */ | ||
291 | volatile uint8_t stay_awake; /* 01h */ | ||
292 | volatile uint8_t intend_sleep; /* 02h */ | ||
293 | volatile uint8_t unused; /* 03h */ | ||
294 | }; | ||
295 | |||
296 | static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR; | ||
297 | |||
298 | #if 1 /* Select ASM */ | ||
299 | /*--------------------------------------------------------------------------- | ||
300 | * Put core in a power-saving state if waking list wasn't repopulated and if | ||
301 | * no other core requested a wakeup for it to perform a task. | ||
302 | *--------------------------------------------------------------------------- | ||
303 | */ | ||
258 | static inline void core_sleep(unsigned int core) | 304 | static inline void core_sleep(unsigned int core) |
259 | { | 305 | { |
260 | #if 1 | ||
261 | asm volatile ( | 306 | asm volatile ( |
262 | "mov r0, #1 \n" /* Signal intent to sleep */ | 307 | "mov r0, #1 \n" /* Signal intent to sleep */ |
263 | "strb r0, [%[sem], #2] \n" | 308 | "strb r0, [%[sem], #2] \n" |
@@ -288,7 +333,50 @@ static inline void core_sleep(unsigned int core) | |||
288 | [ctl]"r"(&CPU_CTL) | 333 | [ctl]"r"(&CPU_CTL) |
289 | : "r0" | 334 | : "r0" |
290 | ); | 335 | ); |
336 | enable_irq(); | ||
337 | } | ||
338 | |||
339 | /*--------------------------------------------------------------------------- | ||
340 | * Wake another processor core that is sleeping or prevent it from doing so | ||
341 | * if it was already destined. FIQ, IRQ should be disabled before calling. | ||
342 | *--------------------------------------------------------------------------- | ||
343 | */ | ||
344 | void core_wake(unsigned int othercore) | ||
345 | { | ||
346 | /* avoid r0 since that contains othercore */ | ||
347 | asm volatile ( | ||
348 | "mrs r3, cpsr \n" /* Disable IRQ */ | ||
349 | "orr r1, r3, #0x80 \n" | ||
350 | "msr cpsr_c, r1 \n" | ||
351 | "mov r1, #1 \n" /* Signal intent to wake other core */ | ||
352 | "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */ | ||
353 | "strh r1, [%[sem], #0] \n" | ||
354 | "mov r2, #0x8000 \n" | ||
355 | "1: \n" /* If it intends to sleep, let it first */ | ||
356 | "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */ | ||
357 | "cmp r1, #1 \n" | ||
358 | "ldr r1, [%[st]] \n" /* && not sleeping ? */ | ||
359 | "tsteq r1, r2, lsr %[oc] \n" | ||
360 | "beq 1b \n" /* Wait for sleep or wake */ | ||
361 | "tst r1, r2, lsr %[oc] \n" | ||
362 | "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */ | ||
363 | "movne r1, #0xce \n" | ||
364 | "strne r1, [r2, %[oc], lsl #2] \n" | ||
365 | "mov r1, #0 \n" /* Done with wake procedure */ | ||
366 | "strb r1, [%[sem], #0] \n" | ||
367 | "msr cpsr_c, r3 \n" /* Restore IRQ */ | ||
368 | : | ||
369 | : [sem]"r"(&core_semaphores[othercore]), | ||
370 | [st]"r"(&PROC_STAT), | ||
371 | [oc]"r"(othercore) | ||
372 | : "r1", "r2", "r3" | ||
373 | ); | ||
374 | } | ||
375 | |||
291 | #else /* C version for reference */ | 376 | #else /* C version for reference */ |
377 | |||
378 | static inline void core_sleep(unsigned int core) | ||
379 | { | ||
292 | /* Signal intent to sleep */ | 380 | /* Signal intent to sleep */ |
293 | core_semaphores[core].intend_sleep = 1; | 381 | core_semaphores[core].intend_sleep = 1; |
294 | 382 | ||
@@ -306,27 +394,71 @@ static inline void core_sleep(unsigned int core) | |||
306 | while (core_semaphores[core].intend_wake != 0); | 394 | while (core_semaphores[core].intend_wake != 0); |
307 | 395 | ||
308 | /* Enable IRQ */ | 396 | /* Enable IRQ */ |
309 | #endif /* ASM/C selection */ | ||
310 | enable_irq(); | 397 | enable_irq(); |
311 | } | 398 | } |
312 | #endif /* NUM_CORES */ | ||
313 | #endif /* PP CPU type */ | ||
314 | 399 | ||
400 | void core_wake(unsigned int othercore) | ||
401 | { | ||
402 | /* Disable interrupts - avoid reentrancy from the tick */ | ||
403 | int oldlevel = disable_irq_save(); | ||
404 | |||
405 | /* Signal intent to wake other processor - set stay awake */ | ||
406 | core_semaphores[othercore].intend_wake = 1; | ||
407 | core_semaphores[othercore].stay_awake = 1; | ||
408 | |||
409 | /* If it intends to sleep, wait until it does or aborts */ | ||
410 | while (core_semaphores[othercore].intend_sleep != 0 && | ||
411 | (PROC_STAT & PROC_SLEEPING(othercore)) == 0); | ||
412 | |||
413 | /* If sleeping, wake it up */ | ||
414 | if (PROC_STAT & PROC_SLEEPING(othercore)) | ||
415 | wake_core(othercore); | ||
416 | |||
417 | /* Done with wake procedure */ | ||
418 | core_semaphores[othercore].intend_wake = 0; | ||
419 | restore_irq(oldlevel); | ||
420 | } | ||
421 | #endif /* ASM/C selection */ | ||
422 | |||
423 | #elif defined (CPU_PP502x) | ||
424 | |||
425 | #if 1 /* Select ASM */ | ||
315 | /*--------------------------------------------------------------------------- | 426 | /*--------------------------------------------------------------------------- |
316 | * Wake another processor core that is sleeping or prevent it from doing so | 427 | * Put core in a power-saving state if waking list wasn't repopulated and if |
317 | * if it was already destined. FIQ, IRQ should be disabled before calling. | 428 | * no other core requested a wakeup for it to perform a task. |
318 | *--------------------------------------------------------------------------- | 429 | *--------------------------------------------------------------------------- |
319 | */ | 430 | */ |
320 | #if NUM_CORES == 1 | 431 | static inline void core_sleep(unsigned int core) |
321 | /* Shared single-core build debugging version */ | ||
322 | void core_wake(void) | ||
323 | { | 432 | { |
324 | /* No wakey - core already wakey */ | 433 | asm volatile ( |
434 | "mov r0, #4 \n" /* r0 = 0x4 << core */ | ||
435 | "mov r0, r0, lsl %[c] \n" | ||
436 | "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ | ||
437 | "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */ | ||
438 | "tst r1, r0, lsl #2 \n" | ||
439 | "moveq r1, #0x80000000 \n" /* Then sleep */ | ||
440 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
441 | "moveq r1, #0 \n" /* Clear control reg */ | ||
442 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
443 | "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */ | ||
444 | "str r1, [%[mbx], #8] \n" | ||
445 | "1: \n" /* Wait for wake procedure to finish */ | ||
446 | "ldr r1, [%[mbx], #0] \n" | ||
447 | "tst r1, r0, lsr #2 \n" | ||
448 | "bne 1b \n" | ||
449 | : | ||
450 | : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core) | ||
451 | : "r0", "r1"); | ||
452 | enable_irq(); | ||
325 | } | 453 | } |
326 | #elif defined (CPU_PP502x) | 454 | |
455 | /*--------------------------------------------------------------------------- | ||
456 | * Wake another processor core that is sleeping or prevent it from doing so | ||
457 | * if it was already destined. FIQ, IRQ should be disabled before calling. | ||
458 | *--------------------------------------------------------------------------- | ||
459 | */ | ||
327 | void core_wake(unsigned int othercore) | 460 | void core_wake(unsigned int othercore) |
328 | { | 461 | { |
329 | #if 1 | ||
330 | /* avoid r0 since that contains othercore */ | 462 | /* avoid r0 since that contains othercore */ |
331 | asm volatile ( | 463 | asm volatile ( |
332 | "mrs r3, cpsr \n" /* Disable IRQ */ | 464 | "mrs r3, cpsr \n" /* Disable IRQ */ |
@@ -352,190 +484,58 @@ void core_wake(unsigned int othercore) | |||
352 | : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), | 484 | : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), |
353 | [oc]"r"(othercore) | 485 | [oc]"r"(othercore) |
354 | : "r1", "r2", "r3"); | 486 | : "r1", "r2", "r3"); |
487 | } | ||
488 | |||
355 | #else /* C version for reference */ | 489 | #else /* C version for reference */ |
356 | /* Disable interrupts - avoid reentrancy from the tick */ | ||
357 | int oldlevel = disable_irq_save(); | ||
358 | 490 | ||
359 | /* Signal intent to wake other processor - set stay awake */ | 491 | static inline void core_sleep(unsigned int core) |
360 | MBX_MSG_SET = 0x11 << othercore; | 492 | { |
493 | /* Signal intent to sleep */ | ||
494 | MBX_MSG_SET = 0x4 << core; | ||
361 | 495 | ||
362 | /* If it intends to sleep, wait until it does or aborts */ | 496 | /* Something waking or other processor intends to wake us? */ |
363 | while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 && | 497 | if ((MBX_MSG_STAT & (0x10 << core)) == 0) |
364 | (PROC_CTL(othercore) & PROC_SLEEP) == 0); | 498 | { |
499 | sleep_core(core); | ||
500 | wake_core(core); | ||
501 | } | ||
365 | 502 | ||
366 | /* If sleeping, wake it up */ | 503 | /* Signal wake - clear wake flag */ |
367 | if (PROC_CTL(othercore) & PROC_SLEEP) | 504 | MBX_MSG_CLR = 0x14 << core; |
368 | PROC_CTL(othercore) = 0; | ||
369 | 505 | ||
370 | /* Done with wake procedure */ | 506 | /* Wait for other processor to finish wake procedure */ |
371 | MBX_MSG_CLR = 0x1 << othercore; | 507 | while (MBX_MSG_STAT & (0x1 << core)); |
372 | restore_irq(oldlevel); | 508 | enable_irq(); |
373 | #endif /* ASM/C selection */ | ||
374 | } | 509 | } |
375 | #elif CONFIG_CPU == PP5002 | 510 | |
376 | /* PP5002 has no mailboxes - emulate using bytes */ | ||
377 | void core_wake(unsigned int othercore) | 511 | void core_wake(unsigned int othercore) |
378 | { | 512 | { |
379 | #if 1 | ||
380 | /* avoid r0 since that contains othercore */ | ||
381 | asm volatile ( | ||
382 | "mrs r3, cpsr \n" /* Disable IRQ */ | ||
383 | "orr r1, r3, #0x80 \n" | ||
384 | "msr cpsr_c, r1 \n" | ||
385 | "mov r1, #1 \n" /* Signal intent to wake other core */ | ||
386 | "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */ | ||
387 | "strh r1, [%[sem], #0] \n" | ||
388 | "mov r2, #0x8000 \n" | ||
389 | "1: \n" /* If it intends to sleep, let it first */ | ||
390 | "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */ | ||
391 | "cmp r1, #1 \n" | ||
392 | "ldr r1, [%[st]] \n" /* && not sleeping ? */ | ||
393 | "tsteq r1, r2, lsr %[oc] \n" | ||
394 | "beq 1b \n" /* Wait for sleep or wake */ | ||
395 | "tst r1, r2, lsr %[oc] \n" | ||
396 | "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */ | ||
397 | "movne r1, #0xce \n" | ||
398 | "strne r1, [r2, %[oc], lsl #2] \n" | ||
399 | "mov r1, #0 \n" /* Done with wake procedure */ | ||
400 | "strb r1, [%[sem], #0] \n" | ||
401 | "msr cpsr_c, r3 \n" /* Restore IRQ */ | ||
402 | : | ||
403 | : [sem]"r"(&core_semaphores[othercore]), | ||
404 | [st]"r"(&PROC_STAT), | ||
405 | [oc]"r"(othercore) | ||
406 | : "r1", "r2", "r3" | ||
407 | ); | ||
408 | #else /* C version for reference */ | ||
409 | /* Disable interrupts - avoid reentrancy from the tick */ | 513 | /* Disable interrupts - avoid reentrancy from the tick */ |
410 | int oldlevel = disable_irq_save(); | 514 | int oldlevel = disable_irq_save(); |
411 | 515 | ||
412 | /* Signal intent to wake other processor - set stay awake */ | 516 | /* Signal intent to wake other processor - set stay awake */ |
413 | core_semaphores[othercore].intend_wake = 1; | 517 | MBX_MSG_SET = 0x11 << othercore; |
414 | core_semaphores[othercore].stay_awake = 1; | ||
415 | 518 | ||
416 | /* If it intends to sleep, wait until it does or aborts */ | 519 | /* If it intends to sleep, wait until it does or aborts */ |
417 | while (core_semaphores[othercore].intend_sleep != 0 && | 520 | while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 && |
418 | (PROC_STAT & PROC_SLEEPING(othercore)) == 0); | 521 | (PROC_CTL(othercore) & PROC_SLEEP) == 0); |
419 | 522 | ||
420 | /* If sleeping, wake it up */ | 523 | /* If sleeping, wake it up */ |
421 | if (PROC_STAT & PROC_SLEEPING(othercore)) | 524 | if (PROC_CTL(othercore) & PROC_SLEEP) |
422 | wake_core(othercore); | 525 | PROC_CTL(othercore) = 0; |
423 | 526 | ||
424 | /* Done with wake procedure */ | 527 | /* Done with wake procedure */ |
425 | core_semaphores[othercore].intend_wake = 0; | 528 | MBX_MSG_CLR = 0x1 << othercore; |
426 | restore_irq(oldlevel); | 529 | restore_irq(oldlevel); |
427 | #endif /* ASM/C selection */ | ||
428 | } | ||
429 | #endif /* CPU type */ | ||
430 | |||
431 | #if NUM_CORES > 1 | ||
432 | /*--------------------------------------------------------------------------- | ||
433 | * Switches to a stack that always resides in the Rockbox core. | ||
434 | * | ||
435 | * Needed when a thread suicides on a core other than the main CPU since the | ||
436 | * stack used when idling is the stack of the last thread to run. This stack | ||
437 | * may not reside in the core firmware in which case the core will continue | ||
438 | * to use a stack from an unloaded module until another thread runs on it. | ||
439 | *--------------------------------------------------------------------------- | ||
440 | */ | ||
441 | static inline void switch_to_idle_stack(const unsigned int core) | ||
442 | { | ||
443 | asm volatile ( | ||
444 | "str sp, [%0] \n" /* save original stack pointer on idle stack */ | ||
445 | "mov sp, %0 \n" /* switch stacks */ | ||
446 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); | ||
447 | (void)core; | ||
448 | } | ||
449 | |||
450 | /*--------------------------------------------------------------------------- | ||
451 | * Perform core switch steps that need to take place inside switch_thread. | ||
452 | * | ||
453 | * These steps must take place while before changing the processor and after | ||
454 | * having entered switch_thread since switch_thread may not do a normal return | ||
455 | * because the stack being used for anything the compiler saved will not belong | ||
456 | * to the thread's destination core and it may have been recycled for other | ||
457 | * purposes by the time a normal context load has taken place. switch_thread | ||
458 | * will also clobber anything stashed in the thread's context or stored in the | ||
459 | * nonvolatile registers if it is saved there before the call since the | ||
460 | * compiler's order of operations cannot be known for certain. | ||
461 | */ | ||
462 | static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) | ||
463 | { | ||
464 | /* Flush our data to ram */ | ||
465 | cpucache_flush(); | ||
466 | /* Stash thread in r4 slot */ | ||
467 | thread->context.r[0] = (uint32_t)thread; | ||
468 | /* Stash restart address in r5 slot */ | ||
469 | thread->context.r[1] = thread->context.start; | ||
470 | /* Save sp in context.sp while still running on old core */ | ||
471 | thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1]; | ||
472 | } | 530 | } |
531 | #endif /* ASM/C selection */ | ||
473 | 532 | ||
474 | /*--------------------------------------------------------------------------- | 533 | #endif /* CPU_PPxxxx */ |
475 | * Machine-specific helper function for switching the processor a thread is | ||
476 | * running on. Basically, the thread suicides on the departing core and is | ||
477 | * reborn on the destination. Were it not for gcc's ill-behavior regarding | ||
478 | * naked functions written in C where it actually clobbers non-volatile | ||
479 | * registers before the intended prologue code, this would all be much | ||
480 | * simpler. Generic setup is done in switch_core itself. | ||
481 | */ | ||
482 | 534 | ||
483 | /*--------------------------------------------------------------------------- | 535 | /* Keep constant pool in range of inline ASM */ |
484 | * This actually performs the core switch. | 536 | static void __attribute__((naked, used)) dump_ltorg(void) |
485 | */ | ||
486 | static void __attribute__((naked)) | ||
487 | switch_thread_core(unsigned int core, struct thread_entry *thread) | ||
488 | { | 537 | { |
489 | /* Pure asm for this because compiler behavior isn't sufficiently predictable. | 538 | asm volatile (".ltorg"); |
490 | * Stack access also isn't permitted until restoring the original stack and | ||
491 | * context. */ | ||
492 | asm volatile ( | ||
493 | "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ | ||
494 | "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ | ||
495 | "ldr r2, [r2, r0, lsl #2] \n" | ||
496 | "add r2, r2, %0*4 \n" | ||
497 | "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ | ||
498 | "mov sp, r2 \n" /* switch stacks */ | ||
499 | "adr r2, 1f \n" /* r2 = new core restart address */ | ||
500 | "str r2, [r1, #40] \n" /* thread->context.start = r2 */ | ||
501 | "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ | ||
502 | "1: \n" | ||
503 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ | ||
504 | "mov r1, #0 \n" /* Clear start address */ | ||
505 | "str r1, [r0, #40] \n" | ||
506 | "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */ | ||
507 | "mov lr, pc \n" | ||
508 | "bx r0 \n" | ||
509 | "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ | ||
510 | ".ltorg \n" /* Dump constant pool */ | ||
511 | : : "i"(IDLE_STACK_WORDS) | ||
512 | ); | ||
513 | (void)core; (void)thread; | ||
514 | } | 539 | } |
515 | 540 | ||
516 | /*--------------------------------------------------------------------------- | ||
517 | * Do any device-specific inits for the threads and synchronize the kernel | ||
518 | * initializations. | ||
519 | *--------------------------------------------------------------------------- | ||
520 | */ | ||
521 | static void core_thread_init(unsigned int core) INIT_ATTR; | ||
522 | static void core_thread_init(unsigned int core) | ||
523 | { | ||
524 | if (core == CPU) | ||
525 | { | ||
526 | /* Wake up coprocessor and let it initialize kernel and threads */ | ||
527 | #ifdef CPU_PP502x | ||
528 | MBX_MSG_CLR = 0x3f; | ||
529 | #endif | ||
530 | wake_core(COP); | ||
531 | /* Sleep until COP has finished */ | ||
532 | sleep_core(CPU); | ||
533 | } | ||
534 | else | ||
535 | { | ||
536 | /* Wake the CPU and return */ | ||
537 | wake_core(CPU); | ||
538 | } | ||
539 | } | ||
540 | #endif /* NUM_CORES */ | 541 | #endif /* NUM_CORES */ |
541 | |||