summaryrefslogtreecommitdiff
path: root/firmware/target/arm
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm')
-rw-r--r--firmware/target/arm/thread-arm.c112
-rw-r--r--firmware/target/arm/thread-pp.c540
2 files changed, 652 insertions, 0 deletions
diff --git a/firmware/target/arm/thread-arm.c b/firmware/target/arm/thread-arm.c
new file mode 100644
index 0000000000..c2d91cec25
--- /dev/null
+++ b/firmware/target/arm/thread-arm.c
@@ -0,0 +1,112 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2005 by Thom Johansen
11 *
12 * Generic ARM threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24/*---------------------------------------------------------------------------
25 * Start the thread running and terminate it if it returns
26 *---------------------------------------------------------------------------
27 */
28static void __attribute__((naked,used)) start_thread(void)
29{
30 /* r0 = context */
31 asm volatile (
32 "ldr sp, [r0, #32] \n" /* Load initial sp */
33 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
34 "mov r1, #0 \n" /* Mark thread as running */
35 "str r1, [r0, #40] \n"
36#if NUM_CORES > 1
37 "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */
38 "mov lr, pc \n" /* This could be the first entry into */
39 "bx r0 \n" /* plugin or codec code for this core. */
40#endif
41 "mov lr, pc \n" /* Call thread function */
42 "bx r4 \n"
43 ); /* No clobber list - new thread doesn't care */
44 thread_exit();
45#if 0
46 asm volatile (".ltorg"); /* Dump constant pool */
47#endif
48}
49
50/* For startup, place context pointer in r4 slot, start_thread pointer in r5
51 * slot, and thread function pointer in context.start. See load_context for
52 * what happens when thread is initially going to run. */
53#define THREAD_STARTUP_INIT(core, thread, function) \
54 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
55 (thread)->context.r[1] = (uint32_t)start_thread, \
56 (thread)->context.start = (uint32_t)function; })
57
58
59/*---------------------------------------------------------------------------
60 * Store non-volatile context.
61 *---------------------------------------------------------------------------
62 */
63static inline void store_context(void* addr)
64{
65 asm volatile(
66 "stmia %0, { r4-r11, sp, lr } \n"
67 : : "r" (addr)
68 );
69}
70
71/*---------------------------------------------------------------------------
72 * Load non-volatile context.
73 *---------------------------------------------------------------------------
74 */
75static inline void load_context(const void* addr)
76{
77 asm volatile(
78 "ldr r0, [%0, #40] \n" /* Load start pointer */
79 "cmp r0, #0 \n" /* Check for NULL */
80 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
81 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
82 : : "r" (addr) : "r0" /* only! */
83 );
84}
85
86#if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \
87|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
88|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2
89/* Use the generic ARMv4/v5/v6 wait for IRQ */
90static inline void core_sleep(void)
91{
92 asm volatile (
93 "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
94#if CONFIG_CPU == IMX31L
95 "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
96#endif
97 : : "r"(0)
98 );
99 enable_irq();
100}
101#else
102/* Skip this if special code is required and implemented */
103#ifndef CPU_PP
104static inline void core_sleep(void)
105{
106 #warning core_sleep not implemented, battery life will be decreased
107 enable_irq();
108}
109#endif /* CPU_PP */
110#endif
111
112
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c
new file mode 100644
index 0000000000..20105ccb59
--- /dev/null
+++ b/firmware/target/arm/thread-pp.c
@@ -0,0 +1,540 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 by Daniel Ankers
11 *
12 * PP5002 and PP502x SoC threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24#if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
25/* Support a special workaround object for large-sector disks */
26#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
27#endif
28
29#if NUM_CORES > 1
30extern uintptr_t cpu_idlestackbegin[];
31extern uintptr_t cpu_idlestackend[];
32extern uintptr_t cop_idlestackbegin[];
33extern uintptr_t cop_idlestackend[];
34static uintptr_t * const idle_stacks[NUM_CORES] =
35{
36 [CPU] = cpu_idlestackbegin,
37 [COP] = cop_idlestackbegin
38};
39
40#if CONFIG_CPU == PP5002
41/* Bytes to emulate the PP502x mailbox bits */
42struct core_semaphores
43{
44 volatile uint8_t intend_wake; /* 00h */
45 volatile uint8_t stay_awake; /* 01h */
46 volatile uint8_t intend_sleep; /* 02h */
47 volatile uint8_t unused; /* 03h */
48};
49
50static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
51#endif /* CONFIG_CPU == PP5002 */
52
53#endif /* NUM_CORES */
54
55#if CONFIG_CORELOCK == SW_CORELOCK
56/* Software core locks using Peterson's mutual exclusion algorithm */
57
58/*---------------------------------------------------------------------------
59 * Initialize the corelock structure.
60 *---------------------------------------------------------------------------
61 */
62void corelock_init(struct corelock *cl)
63{
64 memset(cl, 0, sizeof (*cl));
65}
66
67#if 1 /* Assembly locks to minimize overhead */
68/*---------------------------------------------------------------------------
69 * Wait for the corelock to become free and acquire it when it does.
70 *---------------------------------------------------------------------------
71 */
72void corelock_lock(struct corelock *cl) __attribute__((naked));
73void corelock_lock(struct corelock *cl)
74{
75 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
76 asm volatile (
77 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
78 "ldrb r1, [r1] \n"
79 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
80 "eor r2, r1, #0xff \n" /* r2 = othercore */
81 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
82 "1: \n"
83 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
84 "cmp r3, #0 \n" /* yes? lock acquired */
85 "bxeq lr \n"
86 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
87 "cmp r3, r1 \n"
88 "bxeq lr \n" /* yes? lock acquired */
89 "b 1b \n" /* keep trying */
90 : : "i"(&PROCESSOR_ID)
91 );
92 (void)cl;
93}
94
95/*---------------------------------------------------------------------------
96 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
97 *---------------------------------------------------------------------------
98 */
99int corelock_try_lock(struct corelock *cl) __attribute__((naked));
100int corelock_try_lock(struct corelock *cl)
101{
102 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
103 asm volatile (
104 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
105 "ldrb r1, [r1] \n"
106 "mov r3, r0 \n"
107 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
108 "eor r2, r1, #0xff \n" /* r2 = othercore */
109 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
110 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
111 "eors r0, r0, r2 \n" /* yes? lock acquired */
112 "bxne lr \n"
113 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
114 "ands r0, r0, r1 \n"
115 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
116 "bx lr \n" /* return result */
117 : : "i"(&PROCESSOR_ID)
118 );
119
120 return 0;
121 (void)cl;
122}
123
124/*---------------------------------------------------------------------------
125 * Release ownership of the corelock
126 *---------------------------------------------------------------------------
127 */
128void corelock_unlock(struct corelock *cl) __attribute__((naked));
129void corelock_unlock(struct corelock *cl)
130{
131 asm volatile (
132 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
133 "ldrb r1, [r1] \n"
134 "mov r2, #0 \n" /* cl->myl[core] = 0 */
135 "strb r2, [r0, r1, lsr #7] \n"
136 "bx lr \n"
137 : : "i"(&PROCESSOR_ID)
138 );
139 (void)cl;
140}
141#else /* C versions for reference */
142/*---------------------------------------------------------------------------
143 * Wait for the corelock to become free and aquire it when it does.
144 *---------------------------------------------------------------------------
145 */
146void corelock_lock(struct corelock *cl)
147{
148 const unsigned int core = CURRENT_CORE;
149 const unsigned int othercore = 1 - core;
150
151 cl->myl[core] = core;
152 cl->turn = othercore;
153
154 for (;;)
155 {
156 if (cl->myl[othercore] == 0 || cl->turn == core)
157 break;
158 }
159}
160
161/*---------------------------------------------------------------------------
162 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
163 *---------------------------------------------------------------------------
164 */
165int corelock_try_lock(struct corelock *cl)
166{
167 const unsigned int core = CURRENT_CORE;
168 const unsigned int othercore = 1 - core;
169
170 cl->myl[core] = core;
171 cl->turn = othercore;
172
173 if (cl->myl[othercore] == 0 || cl->turn == core)
174 {
175 return 1;
176 }
177
178 cl->myl[core] = 0;
179 return 0;
180}
181
182/*---------------------------------------------------------------------------
183 * Release ownership of the corelock
184 *---------------------------------------------------------------------------
185 */
186void corelock_unlock(struct corelock *cl)
187{
188 cl->myl[CURRENT_CORE] = 0;
189}
190#endif /* ASM / C selection */
191
192#endif /* CONFIG_CORELOCK == SW_CORELOCK */
193
194/*---------------------------------------------------------------------------
195 * Put core in a power-saving state if waking list wasn't repopulated and if
196 * no other core requested a wakeup for it to perform a task.
197 *---------------------------------------------------------------------------
198 */
199#ifdef CPU_PP502x
200#if NUM_CORES == 1
201static inline void core_sleep(void)
202{
203 sleep_core(CURRENT_CORE);
204 enable_irq();
205}
206#else
207static inline void core_sleep(unsigned int core)
208{
209#if 1
210 asm volatile (
211 "mov r0, #4 \n" /* r0 = 0x4 << core */
212 "mov r0, r0, lsl %[c] \n"
213 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
214 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
215 "tst r1, r0, lsl #2 \n"
216 "moveq r1, #0x80000000 \n" /* Then sleep */
217 "streq r1, [%[ctl], %[c], lsl #2] \n"
218 "moveq r1, #0 \n" /* Clear control reg */
219 "streq r1, [%[ctl], %[c], lsl #2] \n"
220 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
221 "str r1, [%[mbx], #8] \n"
222 "1: \n" /* Wait for wake procedure to finish */
223 "ldr r1, [%[mbx], #0] \n"
224 "tst r1, r0, lsr #2 \n"
225 "bne 1b \n"
226 :
227 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
228 : "r0", "r1");
229#else /* C version for reference */
230 /* Signal intent to sleep */
231 MBX_MSG_SET = 0x4 << core;
232
233 /* Something waking or other processor intends to wake us? */
234 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
235 {
236 sleep_core(core);
237 wake_core(core);
238 }
239
240 /* Signal wake - clear wake flag */
241 MBX_MSG_CLR = 0x14 << core;
242
243 /* Wait for other processor to finish wake procedure */
244 while (MBX_MSG_STAT & (0x1 << core));
245#endif /* ASM/C selection */
246 enable_irq();
247}
248#endif /* NUM_CORES */
249#elif CONFIG_CPU == PP5002
250#if NUM_CORES == 1
251static inline void core_sleep(void)
252{
253 sleep_core(CURRENT_CORE);
254 enable_irq();
255}
256/* PP5002 has no mailboxes - emulate using bytes */
257static inline void core_sleep(unsigned int core)
258{
259#if 1
260 asm volatile (
261 "mov r0, #1 \n" /* Signal intent to sleep */
262 "strb r0, [%[sem], #2] \n"
263 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
264 "cmp r0, #0 \n"
265 "bne 2f \n"
266 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
267 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
268 * that the correct alternative is executed. Don't change the order
269 * of the next 4 instructions! */
270 "tst pc, #0x0c \n"
271 "mov r0, #0xca \n"
272 "strne r0, [%[ctl], %[c], lsl #2] \n"
273 "streq r0, [%[ctl], %[c], lsl #2] \n"
274 "nop \n" /* nop's needed because of pipeline */
275 "nop \n"
276 "nop \n"
277 "2: \n"
278 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
279 "strb r0, [%[sem], #1] \n"
280 "strb r0, [%[sem], #2] \n"
281 "1: \n" /* Wait for wake procedure to finish */
282 "ldrb r0, [%[sem], #0] \n"
283 "cmp r0, #0 \n"
284 "bne 1b \n"
285 :
286 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
287 [ctl]"r"(&CPU_CTL)
288 : "r0"
289 );
290#else /* C version for reference */
291 /* Signal intent to sleep */
292 core_semaphores[core].intend_sleep = 1;
293
294 /* Something waking or other processor intends to wake us? */
295 if (core_semaphores[core].stay_awake == 0)
296 {
297 sleep_core(core);
298 }
299
300 /* Signal wake - clear wake flag */
301 core_semaphores[core].stay_awake = 0;
302 core_semaphores[core].intend_sleep = 0;
303
304 /* Wait for other processor to finish wake procedure */
305 while (core_semaphores[core].intend_wake != 0);
306
307 /* Enable IRQ */
308#endif /* ASM/C selection */
309 enable_irq();
310}
311#endif /* NUM_CORES */
312#endif /* PP CPU type */
313
314/*---------------------------------------------------------------------------
315 * Wake another processor core that is sleeping or prevent it from doing so
316 * if it was already destined. FIQ, IRQ should be disabled before calling.
317 *---------------------------------------------------------------------------
318 */
319#if NUM_CORES == 1
320/* Shared single-core build debugging version */
321void core_wake(void)
322{
323 /* No wakey - core already wakey */
324}
325#elif defined (CPU_PP502x)
326void core_wake(unsigned int othercore)
327{
328#if 1
329 /* avoid r0 since that contains othercore */
330 asm volatile (
331 "mrs r3, cpsr \n" /* Disable IRQ */
332 "orr r1, r3, #0x80 \n"
333 "msr cpsr_c, r1 \n"
334 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
335 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
336 "str r2, [%[mbx], #4] \n"
337 "1: \n" /* If it intends to sleep, let it first */
338 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
339 "eor r1, r1, #0xc \n"
340 "tst r1, r2, lsr #2 \n"
341 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
342 "tsteq r1, #0x80000000 \n"
343 "beq 1b \n" /* Wait for sleep or wake */
344 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
345 "movne r1, #0x0 \n"
346 "strne r1, [%[ctl], %[oc], lsl #2] \n"
347 "mov r1, r2, lsr #4 \n"
348 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
349 "msr cpsr_c, r3 \n" /* Restore IRQ */
350 :
351 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
352 [oc]"r"(othercore)
353 : "r1", "r2", "r3");
354#else /* C version for reference */
355 /* Disable interrupts - avoid reentrancy from the tick */
356 int oldlevel = disable_irq_save();
357
358 /* Signal intent to wake other processor - set stay awake */
359 MBX_MSG_SET = 0x11 << othercore;
360
361 /* If it intends to sleep, wait until it does or aborts */
362 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
363 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
364
365 /* If sleeping, wake it up */
366 if (PROC_CTL(othercore) & PROC_SLEEP)
367 PROC_CTL(othercore) = 0;
368
369 /* Done with wake procedure */
370 MBX_MSG_CLR = 0x1 << othercore;
371 restore_irq(oldlevel);
372#endif /* ASM/C selection */
373}
374#elif CONFIG_CPU == PP5002
375/* PP5002 has no mailboxes - emulate using bytes */
376void core_wake(unsigned int othercore)
377{
378#if 1
379 /* avoid r0 since that contains othercore */
380 asm volatile (
381 "mrs r3, cpsr \n" /* Disable IRQ */
382 "orr r1, r3, #0x80 \n"
383 "msr cpsr_c, r1 \n"
384 "mov r1, #1 \n" /* Signal intent to wake other core */
385 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
386 "strh r1, [%[sem], #0] \n"
387 "mov r2, #0x8000 \n"
388 "1: \n" /* If it intends to sleep, let it first */
389 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
390 "cmp r1, #1 \n"
391 "ldr r1, [%[st]] \n" /* && not sleeping ? */
392 "tsteq r1, r2, lsr %[oc] \n"
393 "beq 1b \n" /* Wait for sleep or wake */
394 "tst r1, r2, lsr %[oc] \n"
395 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
396 "movne r1, #0xce \n"
397 "strne r1, [r2, %[oc], lsl #2] \n"
398 "mov r1, #0 \n" /* Done with wake procedure */
399 "strb r1, [%[sem], #0] \n"
400 "msr cpsr_c, r3 \n" /* Restore IRQ */
401 :
402 : [sem]"r"(&core_semaphores[othercore]),
403 [st]"r"(&PROC_STAT),
404 [oc]"r"(othercore)
405 : "r1", "r2", "r3"
406 );
407#else /* C version for reference */
408 /* Disable interrupts - avoid reentrancy from the tick */
409 int oldlevel = disable_irq_save();
410
411 /* Signal intent to wake other processor - set stay awake */
412 core_semaphores[othercore].intend_wake = 1;
413 core_semaphores[othercore].stay_awake = 1;
414
415 /* If it intends to sleep, wait until it does or aborts */
416 while (core_semaphores[othercore].intend_sleep != 0 &&
417 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
418
419 /* If sleeping, wake it up */
420 if (PROC_STAT & PROC_SLEEPING(othercore))
421 wake_core(othercore);
422
423 /* Done with wake procedure */
424 core_semaphores[othercore].intend_wake = 0;
425 restore_irq(oldlevel);
426#endif /* ASM/C selection */
427}
428#endif /* CPU type */
429
430#if NUM_CORES > 1
431/*---------------------------------------------------------------------------
432 * Switches to a stack that always resides in the Rockbox core.
433 *
434 * Needed when a thread suicides on a core other than the main CPU since the
435 * stack used when idling is the stack of the last thread to run. This stack
436 * may not reside in the core firmware in which case the core will continue
437 * to use a stack from an unloaded module until another thread runs on it.
438 *---------------------------------------------------------------------------
439 */
440static inline void switch_to_idle_stack(const unsigned int core)
441{
442 asm volatile (
443 "str sp, [%0] \n" /* save original stack pointer on idle stack */
444 "mov sp, %0 \n" /* switch stacks */
445 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
446 (void)core;
447}
448
449/*---------------------------------------------------------------------------
450 * Perform core switch steps that need to take place inside switch_thread.
451 *
452 * These steps must take place while before changing the processor and after
453 * having entered switch_thread since switch_thread may not do a normal return
454 * because the stack being used for anything the compiler saved will not belong
455 * to the thread's destination core and it may have been recycled for other
456 * purposes by the time a normal context load has taken place. switch_thread
457 * will also clobber anything stashed in the thread's context or stored in the
458 * nonvolatile registers if it is saved there before the call since the
459 * compiler's order of operations cannot be known for certain.
460 */
461static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
462{
463 /* Flush our data to ram */
464 cpucache_flush();
465 /* Stash thread in r4 slot */
466 thread->context.r[0] = (uint32_t)thread;
467 /* Stash restart address in r5 slot */
468 thread->context.r[1] = thread->context.start;
469 /* Save sp in context.sp while still running on old core */
470 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
471}
472
473/*---------------------------------------------------------------------------
474 * Machine-specific helper function for switching the processor a thread is
475 * running on. Basically, the thread suicides on the departing core and is
476 * reborn on the destination. Were it not for gcc's ill-behavior regarding
477 * naked functions written in C where it actually clobbers non-volatile
478 * registers before the intended prologue code, this would all be much
479 * simpler. Generic setup is done in switch_core itself.
480 */
481
482/*---------------------------------------------------------------------------
483 * This actually performs the core switch.
484 */
485static void __attribute__((naked))
486 switch_thread_core(unsigned int core, struct thread_entry *thread)
487{
488 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
489 * Stack access also isn't permitted until restoring the original stack and
490 * context. */
491 asm volatile (
492 "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
493 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
494 "ldr r2, [r2, r0, lsl #2] \n"
495 "add r2, r2, %0*4 \n"
496 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
497 "mov sp, r2 \n" /* switch stacks */
498 "adr r2, 1f \n" /* r2 = new core restart address */
499 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
500 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
501 "1: \n"
502 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
503 "mov r1, #0 \n" /* Clear start address */
504 "str r1, [r0, #40] \n"
505 "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
506 "mov lr, pc \n"
507 "bx r0 \n"
508 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
509 ".ltorg \n" /* Dump constant pool */
510 : : "i"(IDLE_STACK_WORDS)
511 );
512 (void)core; (void)thread;
513}
514
515/*---------------------------------------------------------------------------
516 * Do any device-specific inits for the threads and synchronize the kernel
517 * initializations.
518 *---------------------------------------------------------------------------
519 */
520static void core_thread_init(unsigned int core) INIT_ATTR;
521static void core_thread_init(unsigned int core)
522{
523 if (core == CPU)
524 {
525 /* Wake up coprocessor and let it initialize kernel and threads */
526#ifdef CPU_PP502x
527 MBX_MSG_CLR = 0x3f;
528#endif
529 wake_core(COP);
530 /* Sleep until COP has finished */
531 sleep_core(CPU);
532 }
533 else
534 {
535 /* Wake the CPU and return */
536 wake_core(CPU);
537 }
538}
539#endif /* NUM_CORES */
540