summaryrefslogtreecommitdiff
path: root/firmware/target/arm
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2010-06-10 17:31:45 +0000
committerMichael Sevakis <jethead71@rockbox.org>2010-06-10 17:31:45 +0000
commit05ca8978c4fe965a619f016d79aaf6955767abf9 (patch)
tree606a19c322864fa823fda7c0a6daf998f76417e3 /firmware/target/arm
parent863891ce9aef50fde13cf3df897aca144a2c570a (diff)
downloadrockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.tar.gz
rockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.zip
Clean unused stuff out of thread.h and config.h and reorganize thread-pp.c to simplify the preprocessor blocks.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26743 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/target/arm')
-rw-r--r--firmware/target/arm/thread-pp.c526
1 files changed, 263 insertions, 263 deletions
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c
index 8dfbd64080..335f1f3e0a 100644
--- a/firmware/target/arm/thread-pp.c
+++ b/firmware/target/arm/thread-pp.c
@@ -26,7 +26,21 @@
26#define IF_NO_SKIP_YIELD(...) __VA_ARGS__ 26#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
27#endif 27#endif
28 28
29#if NUM_CORES > 1 29#if NUM_CORES == 1
30/* Single-core variants for FORCE_SINGLE_CORE */
31static inline void core_sleep(void)
32{
33 sleep_core(CURRENT_CORE);
34 enable_irq();
35}
36
37/* Shared single-core build debugging version */
38void core_wake(void)
39{
40 /* No wakey - core already wakey (because this is it) */
41}
42#else /* NUM_CORES > 1 */
43/** Model-generic PP dual-core code **/
30extern uintptr_t cpu_idlestackbegin[]; 44extern uintptr_t cpu_idlestackbegin[];
31extern uintptr_t cpu_idlestackend[]; 45extern uintptr_t cpu_idlestackend[];
32extern uintptr_t cop_idlestackbegin[]; 46extern uintptr_t cop_idlestackbegin[];
@@ -37,23 +51,7 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
37 [COP] = cop_idlestackbegin 51 [COP] = cop_idlestackbegin
38}; 52};
39 53
40#if CONFIG_CPU == PP5002 54/* Core locks using Peterson's mutual exclusion algorithm */
41/* Bytes to emulate the PP502x mailbox bits */
42struct core_semaphores
43{
44 volatile uint8_t intend_wake; /* 00h */
45 volatile uint8_t stay_awake; /* 01h */
46 volatile uint8_t intend_sleep; /* 02h */
47 volatile uint8_t unused; /* 03h */
48};
49
50static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
51#endif /* CONFIG_CPU == PP5002 */
52
53#endif /* NUM_CORES */
54
55#if CONFIG_CORELOCK == SW_CORELOCK
56/* Software core locks using Peterson's mutual exclusion algorithm */
57 55
58/*--------------------------------------------------------------------------- 56/*---------------------------------------------------------------------------
59 * Initialize the corelock structure. 57 * Initialize the corelock structure.
@@ -69,8 +67,7 @@ void corelock_init(struct corelock *cl)
69 * Wait for the corelock to become free and acquire it when it does. 67 * Wait for the corelock to become free and acquire it when it does.
70 *--------------------------------------------------------------------------- 68 *---------------------------------------------------------------------------
71 */ 69 */
72void corelock_lock(struct corelock *cl) __attribute__((naked)); 70void __attribute__((naked)) corelock_lock(struct corelock *cl)
73void corelock_lock(struct corelock *cl)
74{ 71{
75 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ 72 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
76 asm volatile ( 73 asm volatile (
@@ -96,8 +93,7 @@ void corelock_lock(struct corelock *cl)
96 * Try to aquire the corelock. If free, caller gets it, otherwise return 0. 93 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
97 *--------------------------------------------------------------------------- 94 *---------------------------------------------------------------------------
98 */ 95 */
99int corelock_try_lock(struct corelock *cl) __attribute__((naked)); 96int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
100int corelock_try_lock(struct corelock *cl)
101{ 97{
102 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ 98 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
103 asm volatile ( 99 asm volatile (
@@ -125,8 +121,7 @@ int corelock_try_lock(struct corelock *cl)
125 * Release ownership of the corelock 121 * Release ownership of the corelock
126 *--------------------------------------------------------------------------- 122 *---------------------------------------------------------------------------
127 */ 123 */
128void corelock_unlock(struct corelock *cl) __attribute__((naked)); 124void __attribute__((naked)) corelock_unlock(struct corelock *cl)
129void corelock_unlock(struct corelock *cl)
130{ 125{
131 asm volatile ( 126 asm volatile (
132 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ 127 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
@@ -138,11 +133,9 @@ void corelock_unlock(struct corelock *cl)
138 ); 133 );
139 (void)cl; 134 (void)cl;
140} 135}
136
141#else /* C versions for reference */ 137#else /* C versions for reference */
142/*--------------------------------------------------------------------------- 138
143 * Wait for the corelock to become free and aquire it when it does.
144 *---------------------------------------------------------------------------
145 */
146void corelock_lock(struct corelock *cl) 139void corelock_lock(struct corelock *cl)
147{ 140{
148 const unsigned int core = CURRENT_CORE; 141 const unsigned int core = CURRENT_CORE;
@@ -158,10 +151,6 @@ void corelock_lock(struct corelock *cl)
158 } 151 }
159} 152}
160 153
161/*---------------------------------------------------------------------------
162 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
163 *---------------------------------------------------------------------------
164 */
165int corelock_try_lock(struct corelock *cl) 154int corelock_try_lock(struct corelock *cl)
166{ 155{
167 const unsigned int core = CURRENT_CORE; 156 const unsigned int core = CURRENT_CORE;
@@ -179,85 +168,141 @@ int corelock_try_lock(struct corelock *cl)
179 return 0; 168 return 0;
180} 169}
181 170
182/*---------------------------------------------------------------------------
183 * Release ownership of the corelock
184 *---------------------------------------------------------------------------
185 */
186void corelock_unlock(struct corelock *cl) 171void corelock_unlock(struct corelock *cl)
187{ 172{
188 cl->myl[CURRENT_CORE] = 0; 173 cl->myl[CURRENT_CORE] = 0;
189} 174}
190#endif /* ASM / C selection */ 175#endif /* ASM / C selection */
191 176
192#endif /* CONFIG_CORELOCK == SW_CORELOCK */
193
194/*--------------------------------------------------------------------------- 177/*---------------------------------------------------------------------------
195 * Put core in a power-saving state if waking list wasn't repopulated and if 178 * Do any device-specific inits for the threads and synchronize the kernel
196 * no other core requested a wakeup for it to perform a task. 179 * initializations.
197 *--------------------------------------------------------------------------- 180 *---------------------------------------------------------------------------
198 */ 181 */
199#ifdef CPU_PP502x 182static void INIT_ATTR core_thread_init(unsigned int core)
200#if NUM_CORES == 1
201static inline void core_sleep(void)
202{ 183{
203 sleep_core(CURRENT_CORE); 184 if (core == CPU)
204 enable_irq(); 185 {
186 /* Wake up coprocessor and let it initialize kernel and threads */
187#ifdef CPU_PP502x
188 MBX_MSG_CLR = 0x3f;
189#endif
190 wake_core(COP);
191 /* Sleep until COP has finished */
192 sleep_core(CPU);
193 }
194 else
195 {
196 /* Wake the CPU and return */
197 wake_core(CPU);
198 }
205} 199}
206#else 200
207static inline void core_sleep(unsigned int core) 201/*---------------------------------------------------------------------------
202 * Switches to a stack that always resides in the Rockbox core.
203 *
204 * Needed when a thread suicides on a core other than the main CPU since the
205 * stack used when idling is the stack of the last thread to run. This stack
206 * may not reside in the core firmware in which case the core will continue
207 * to use a stack from an unloaded module until another thread runs on it.
208 *---------------------------------------------------------------------------
209 */
210static inline void switch_to_idle_stack(const unsigned int core)
208{ 211{
209#if 1
210 asm volatile ( 212 asm volatile (
211 "mov r0, #4 \n" /* r0 = 0x4 << core */ 213 "str sp, [%0] \n" /* save original stack pointer on idle stack */
212 "mov r0, r0, lsl %[c] \n" 214 "mov sp, %0 \n" /* switch stacks */
213 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ 215 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
214 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */ 216 (void)core;
215 "tst r1, r0, lsl #2 \n" 217}
216 "moveq r1, #0x80000000 \n" /* Then sleep */
217 "streq r1, [%[ctl], %[c], lsl #2] \n"
218 "moveq r1, #0 \n" /* Clear control reg */
219 "streq r1, [%[ctl], %[c], lsl #2] \n"
220 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
221 "str r1, [%[mbx], #8] \n"
222 "1: \n" /* Wait for wake procedure to finish */
223 "ldr r1, [%[mbx], #0] \n"
224 "tst r1, r0, lsr #2 \n"
225 "bne 1b \n"
226 :
227 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
228 : "r0", "r1");
229#else /* C version for reference */
230 /* Signal intent to sleep */
231 MBX_MSG_SET = 0x4 << core;
232 218
233 /* Something waking or other processor intends to wake us? */ 219/*---------------------------------------------------------------------------
234 if ((MBX_MSG_STAT & (0x10 << core)) == 0) 220 * Perform core switch steps that need to take place inside switch_thread.
235 { 221 *
236 sleep_core(core); 222 * These steps must take place while before changing the processor and after
237 wake_core(core); 223 * having entered switch_thread since switch_thread may not do a normal return
238 } 224 * because the stack being used for anything the compiler saved will not belong
225 * to the thread's destination core and it may have been recycled for other
226 * purposes by the time a normal context load has taken place. switch_thread
227 * will also clobber anything stashed in the thread's context or stored in the
228 * nonvolatile registers if it is saved there before the call since the
229 * compiler's order of operations cannot be known for certain.
230 */
231static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
232{
233 /* Flush our data to ram */
234 cpucache_flush();
235 /* Stash thread in r4 slot */
236 thread->context.r[0] = (uint32_t)thread;
237 /* Stash restart address in r5 slot */
238 thread->context.r[1] = thread->context.start;
239 /* Save sp in context.sp while still running on old core */
240 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
241}
239 242
240 /* Signal wake - clear wake flag */ 243/*---------------------------------------------------------------------------
241 MBX_MSG_CLR = 0x14 << core; 244 * Machine-specific helper function for switching the processor a thread is
245 * running on. Basically, the thread suicides on the departing core and is
246 * reborn on the destination. Were it not for gcc's ill-behavior regarding
247 * naked functions written in C where it actually clobbers non-volatile
248 * registers before the intended prologue code, this would all be much
249 * simpler. Generic setup is done in switch_core itself.
250 */
242 251
243 /* Wait for other processor to finish wake procedure */ 252/*---------------------------------------------------------------------------
244 while (MBX_MSG_STAT & (0x1 << core)); 253 * This actually performs the core switch.
245#endif /* ASM/C selection */ 254 */
246 enable_irq(); 255static void __attribute__((naked))
247} 256 switch_thread_core(unsigned int core, struct thread_entry *thread)
248#endif /* NUM_CORES */
249#elif CONFIG_CPU == PP5002
250#if NUM_CORES == 1
251static inline void core_sleep(void)
252{ 257{
253 sleep_core(CURRENT_CORE); 258 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
254 enable_irq(); 259 * Stack access also isn't permitted until restoring the original stack and
260 * context. */
261 asm volatile (
262 "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
263 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
264 "ldr r2, [r2, r0, lsl #2] \n"
265 "add r2, r2, %0*4 \n"
266 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
267 "mov sp, r2 \n" /* switch stacks */
268 "adr r2, 1f \n" /* r2 = new core restart address */
269 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
270 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
271 "1: \n"
272 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
273 "mov r1, #0 \n" /* Clear start address */
274 "str r1, [r0, #40] \n"
275 "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
276 "mov lr, pc \n"
277 "bx r0 \n"
278 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
279 : : "i"(IDLE_STACK_WORDS)
280 );
281 (void)core; (void)thread;
255} 282}
256#else 283
257/* PP5002 has no mailboxes - emulate using bytes */ 284/** PP-model-specific dual-core code **/
285
286#if CONFIG_CPU == PP5002
287/* PP5002 has no mailboxes - Bytes to emulate the PP502x mailbox bits */
288struct core_semaphores
289{
290 volatile uint8_t intend_wake; /* 00h */
291 volatile uint8_t stay_awake; /* 01h */
292 volatile uint8_t intend_sleep; /* 02h */
293 volatile uint8_t unused; /* 03h */
294};
295
296static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
297
298#if 1 /* Select ASM */
299/*---------------------------------------------------------------------------
300 * Put core in a power-saving state if waking list wasn't repopulated and if
301 * no other core requested a wakeup for it to perform a task.
302 *---------------------------------------------------------------------------
303 */
258static inline void core_sleep(unsigned int core) 304static inline void core_sleep(unsigned int core)
259{ 305{
260#if 1
261 asm volatile ( 306 asm volatile (
262 "mov r0, #1 \n" /* Signal intent to sleep */ 307 "mov r0, #1 \n" /* Signal intent to sleep */
263 "strb r0, [%[sem], #2] \n" 308 "strb r0, [%[sem], #2] \n"
@@ -288,7 +333,50 @@ static inline void core_sleep(unsigned int core)
288 [ctl]"r"(&CPU_CTL) 333 [ctl]"r"(&CPU_CTL)
289 : "r0" 334 : "r0"
290 ); 335 );
336 enable_irq();
337}
338
339/*---------------------------------------------------------------------------
340 * Wake another processor core that is sleeping or prevent it from doing so
341 * if it was already destined. FIQ, IRQ should be disabled before calling.
342 *---------------------------------------------------------------------------
343 */
344void core_wake(unsigned int othercore)
345{
346 /* avoid r0 since that contains othercore */
347 asm volatile (
348 "mrs r3, cpsr \n" /* Disable IRQ */
349 "orr r1, r3, #0x80 \n"
350 "msr cpsr_c, r1 \n"
351 "mov r1, #1 \n" /* Signal intent to wake other core */
352 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
353 "strh r1, [%[sem], #0] \n"
354 "mov r2, #0x8000 \n"
355 "1: \n" /* If it intends to sleep, let it first */
356 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
357 "cmp r1, #1 \n"
358 "ldr r1, [%[st]] \n" /* && not sleeping ? */
359 "tsteq r1, r2, lsr %[oc] \n"
360 "beq 1b \n" /* Wait for sleep or wake */
361 "tst r1, r2, lsr %[oc] \n"
362 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
363 "movne r1, #0xce \n"
364 "strne r1, [r2, %[oc], lsl #2] \n"
365 "mov r1, #0 \n" /* Done with wake procedure */
366 "strb r1, [%[sem], #0] \n"
367 "msr cpsr_c, r3 \n" /* Restore IRQ */
368 :
369 : [sem]"r"(&core_semaphores[othercore]),
370 [st]"r"(&PROC_STAT),
371 [oc]"r"(othercore)
372 : "r1", "r2", "r3"
373 );
374}
375
291#else /* C version for reference */ 376#else /* C version for reference */
377
378static inline void core_sleep(unsigned int core)
379{
292 /* Signal intent to sleep */ 380 /* Signal intent to sleep */
293 core_semaphores[core].intend_sleep = 1; 381 core_semaphores[core].intend_sleep = 1;
294 382
@@ -306,27 +394,71 @@ static inline void core_sleep(unsigned int core)
306 while (core_semaphores[core].intend_wake != 0); 394 while (core_semaphores[core].intend_wake != 0);
307 395
308 /* Enable IRQ */ 396 /* Enable IRQ */
309#endif /* ASM/C selection */
310 enable_irq(); 397 enable_irq();
311} 398}
312#endif /* NUM_CORES */
313#endif /* PP CPU type */
314 399
400void core_wake(unsigned int othercore)
401{
402 /* Disable interrupts - avoid reentrancy from the tick */
403 int oldlevel = disable_irq_save();
404
405 /* Signal intent to wake other processor - set stay awake */
406 core_semaphores[othercore].intend_wake = 1;
407 core_semaphores[othercore].stay_awake = 1;
408
409 /* If it intends to sleep, wait until it does or aborts */
410 while (core_semaphores[othercore].intend_sleep != 0 &&
411 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
412
413 /* If sleeping, wake it up */
414 if (PROC_STAT & PROC_SLEEPING(othercore))
415 wake_core(othercore);
416
417 /* Done with wake procedure */
418 core_semaphores[othercore].intend_wake = 0;
419 restore_irq(oldlevel);
420}
421#endif /* ASM/C selection */
422
423#elif defined (CPU_PP502x)
424
425#if 1 /* Select ASM */
315/*--------------------------------------------------------------------------- 426/*---------------------------------------------------------------------------
316 * Wake another processor core that is sleeping or prevent it from doing so 427 * Put core in a power-saving state if waking list wasn't repopulated and if
317 * if it was already destined. FIQ, IRQ should be disabled before calling. 428 * no other core requested a wakeup for it to perform a task.
318 *--------------------------------------------------------------------------- 429 *---------------------------------------------------------------------------
319 */ 430 */
320#if NUM_CORES == 1 431static inline void core_sleep(unsigned int core)
321/* Shared single-core build debugging version */
322void core_wake(void)
323{ 432{
324 /* No wakey - core already wakey */ 433 asm volatile (
434 "mov r0, #4 \n" /* r0 = 0x4 << core */
435 "mov r0, r0, lsl %[c] \n"
436 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
437 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
438 "tst r1, r0, lsl #2 \n"
439 "moveq r1, #0x80000000 \n" /* Then sleep */
440 "streq r1, [%[ctl], %[c], lsl #2] \n"
441 "moveq r1, #0 \n" /* Clear control reg */
442 "streq r1, [%[ctl], %[c], lsl #2] \n"
443 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
444 "str r1, [%[mbx], #8] \n"
445 "1: \n" /* Wait for wake procedure to finish */
446 "ldr r1, [%[mbx], #0] \n"
447 "tst r1, r0, lsr #2 \n"
448 "bne 1b \n"
449 :
450 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
451 : "r0", "r1");
452 enable_irq();
325} 453}
326#elif defined (CPU_PP502x) 454
455/*---------------------------------------------------------------------------
456 * Wake another processor core that is sleeping or prevent it from doing so
457 * if it was already destined. FIQ, IRQ should be disabled before calling.
458 *---------------------------------------------------------------------------
459 */
327void core_wake(unsigned int othercore) 460void core_wake(unsigned int othercore)
328{ 461{
329#if 1
330 /* avoid r0 since that contains othercore */ 462 /* avoid r0 since that contains othercore */
331 asm volatile ( 463 asm volatile (
332 "mrs r3, cpsr \n" /* Disable IRQ */ 464 "mrs r3, cpsr \n" /* Disable IRQ */
@@ -352,190 +484,58 @@ void core_wake(unsigned int othercore)
352 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), 484 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
353 [oc]"r"(othercore) 485 [oc]"r"(othercore)
354 : "r1", "r2", "r3"); 486 : "r1", "r2", "r3");
487}
488
355#else /* C version for reference */ 489#else /* C version for reference */
356 /* Disable interrupts - avoid reentrancy from the tick */
357 int oldlevel = disable_irq_save();
358 490
359 /* Signal intent to wake other processor - set stay awake */ 491static inline void core_sleep(unsigned int core)
360 MBX_MSG_SET = 0x11 << othercore; 492{
493 /* Signal intent to sleep */
494 MBX_MSG_SET = 0x4 << core;
361 495
362 /* If it intends to sleep, wait until it does or aborts */ 496 /* Something waking or other processor intends to wake us? */
363 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 && 497 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
364 (PROC_CTL(othercore) & PROC_SLEEP) == 0); 498 {
499 sleep_core(core);
500 wake_core(core);
501 }
365 502
366 /* If sleeping, wake it up */ 503 /* Signal wake - clear wake flag */
367 if (PROC_CTL(othercore) & PROC_SLEEP) 504 MBX_MSG_CLR = 0x14 << core;
368 PROC_CTL(othercore) = 0;
369 505
370 /* Done with wake procedure */ 506 /* Wait for other processor to finish wake procedure */
371 MBX_MSG_CLR = 0x1 << othercore; 507 while (MBX_MSG_STAT & (0x1 << core));
372 restore_irq(oldlevel); 508 enable_irq();
373#endif /* ASM/C selection */
374} 509}
375#elif CONFIG_CPU == PP5002 510
376/* PP5002 has no mailboxes - emulate using bytes */
377void core_wake(unsigned int othercore) 511void core_wake(unsigned int othercore)
378{ 512{
379#if 1
380 /* avoid r0 since that contains othercore */
381 asm volatile (
382 "mrs r3, cpsr \n" /* Disable IRQ */
383 "orr r1, r3, #0x80 \n"
384 "msr cpsr_c, r1 \n"
385 "mov r1, #1 \n" /* Signal intent to wake other core */
386 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
387 "strh r1, [%[sem], #0] \n"
388 "mov r2, #0x8000 \n"
389 "1: \n" /* If it intends to sleep, let it first */
390 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
391 "cmp r1, #1 \n"
392 "ldr r1, [%[st]] \n" /* && not sleeping ? */
393 "tsteq r1, r2, lsr %[oc] \n"
394 "beq 1b \n" /* Wait for sleep or wake */
395 "tst r1, r2, lsr %[oc] \n"
396 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
397 "movne r1, #0xce \n"
398 "strne r1, [r2, %[oc], lsl #2] \n"
399 "mov r1, #0 \n" /* Done with wake procedure */
400 "strb r1, [%[sem], #0] \n"
401 "msr cpsr_c, r3 \n" /* Restore IRQ */
402 :
403 : [sem]"r"(&core_semaphores[othercore]),
404 [st]"r"(&PROC_STAT),
405 [oc]"r"(othercore)
406 : "r1", "r2", "r3"
407 );
408#else /* C version for reference */
409 /* Disable interrupts - avoid reentrancy from the tick */ 513 /* Disable interrupts - avoid reentrancy from the tick */
410 int oldlevel = disable_irq_save(); 514 int oldlevel = disable_irq_save();
411 515
412 /* Signal intent to wake other processor - set stay awake */ 516 /* Signal intent to wake other processor - set stay awake */
413 core_semaphores[othercore].intend_wake = 1; 517 MBX_MSG_SET = 0x11 << othercore;
414 core_semaphores[othercore].stay_awake = 1;
415 518
416 /* If it intends to sleep, wait until it does or aborts */ 519 /* If it intends to sleep, wait until it does or aborts */
417 while (core_semaphores[othercore].intend_sleep != 0 && 520 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
418 (PROC_STAT & PROC_SLEEPING(othercore)) == 0); 521 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
419 522
420 /* If sleeping, wake it up */ 523 /* If sleeping, wake it up */
421 if (PROC_STAT & PROC_SLEEPING(othercore)) 524 if (PROC_CTL(othercore) & PROC_SLEEP)
422 wake_core(othercore); 525 PROC_CTL(othercore) = 0;
423 526
424 /* Done with wake procedure */ 527 /* Done with wake procedure */
425 core_semaphores[othercore].intend_wake = 0; 528 MBX_MSG_CLR = 0x1 << othercore;
426 restore_irq(oldlevel); 529 restore_irq(oldlevel);
427#endif /* ASM/C selection */
428}
429#endif /* CPU type */
430
431#if NUM_CORES > 1
432/*---------------------------------------------------------------------------
433 * Switches to a stack that always resides in the Rockbox core.
434 *
435 * Needed when a thread suicides on a core other than the main CPU since the
436 * stack used when idling is the stack of the last thread to run. This stack
437 * may not reside in the core firmware in which case the core will continue
438 * to use a stack from an unloaded module until another thread runs on it.
439 *---------------------------------------------------------------------------
440 */
441static inline void switch_to_idle_stack(const unsigned int core)
442{
443 asm volatile (
444 "str sp, [%0] \n" /* save original stack pointer on idle stack */
445 "mov sp, %0 \n" /* switch stacks */
446 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
447 (void)core;
448}
449
450/*---------------------------------------------------------------------------
451 * Perform core switch steps that need to take place inside switch_thread.
452 *
453 * These steps must take place while before changing the processor and after
454 * having entered switch_thread since switch_thread may not do a normal return
455 * because the stack being used for anything the compiler saved will not belong
456 * to the thread's destination core and it may have been recycled for other
457 * purposes by the time a normal context load has taken place. switch_thread
458 * will also clobber anything stashed in the thread's context or stored in the
459 * nonvolatile registers if it is saved there before the call since the
460 * compiler's order of operations cannot be known for certain.
461 */
462static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
463{
464 /* Flush our data to ram */
465 cpucache_flush();
466 /* Stash thread in r4 slot */
467 thread->context.r[0] = (uint32_t)thread;
468 /* Stash restart address in r5 slot */
469 thread->context.r[1] = thread->context.start;
470 /* Save sp in context.sp while still running on old core */
471 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
472} 530}
531#endif /* ASM/C selection */
473 532
474/*--------------------------------------------------------------------------- 533#endif /* CPU_PPxxxx */
475 * Machine-specific helper function for switching the processor a thread is
476 * running on. Basically, the thread suicides on the departing core and is
477 * reborn on the destination. Were it not for gcc's ill-behavior regarding
478 * naked functions written in C where it actually clobbers non-volatile
479 * registers before the intended prologue code, this would all be much
480 * simpler. Generic setup is done in switch_core itself.
481 */
482 534
483/*--------------------------------------------------------------------------- 535/* Keep constant pool in range of inline ASM */
484 * This actually performs the core switch. 536static void __attribute__((naked, used)) dump_ltorg(void)
485 */
486static void __attribute__((naked))
487 switch_thread_core(unsigned int core, struct thread_entry *thread)
488{ 537{
489 /* Pure asm for this because compiler behavior isn't sufficiently predictable. 538 asm volatile (".ltorg");
490 * Stack access also isn't permitted until restoring the original stack and
491 * context. */
492 asm volatile (
493 "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
494 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
495 "ldr r2, [r2, r0, lsl #2] \n"
496 "add r2, r2, %0*4 \n"
497 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
498 "mov sp, r2 \n" /* switch stacks */
499 "adr r2, 1f \n" /* r2 = new core restart address */
500 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
501 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
502 "1: \n"
503 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
504 "mov r1, #0 \n" /* Clear start address */
505 "str r1, [r0, #40] \n"
506 "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
507 "mov lr, pc \n"
508 "bx r0 \n"
509 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
510 ".ltorg \n" /* Dump constant pool */
511 : : "i"(IDLE_STACK_WORDS)
512 );
513 (void)core; (void)thread;
514} 539}
515 540
516/*---------------------------------------------------------------------------
517 * Do any device-specific inits for the threads and synchronize the kernel
518 * initializations.
519 *---------------------------------------------------------------------------
520 */
521static void core_thread_init(unsigned int core) INIT_ATTR;
522static void core_thread_init(unsigned int core)
523{
524 if (core == CPU)
525 {
526 /* Wake up coprocessor and let it initialize kernel and threads */
527#ifdef CPU_PP502x
528 MBX_MSG_CLR = 0x3f;
529#endif
530 wake_core(COP);
531 /* Sleep until COP has finished */
532 sleep_core(CPU);
533 }
534 else
535 {
536 /* Wake the CPU and return */
537 wake_core(CPU);
538 }
539}
540#endif /* NUM_CORES */ 541#endif /* NUM_CORES */
541