summaryrefslogtreecommitdiff
path: root/firmware/target
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target')
-rw-r--r--firmware/target/arm/thread-arm.c112
-rw-r--r--firmware/target/arm/thread-pp.c540
-rw-r--r--firmware/target/coldfire/thread-coldfire.c97
-rw-r--r--firmware/target/mips/thread-mips32.c133
-rw-r--r--firmware/target/sh/thread-sh.c109
5 files changed, 991 insertions, 0 deletions
diff --git a/firmware/target/arm/thread-arm.c b/firmware/target/arm/thread-arm.c
new file mode 100644
index 0000000000..c2d91cec25
--- /dev/null
+++ b/firmware/target/arm/thread-arm.c
@@ -0,0 +1,112 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2005 by Thom Johansen
11 *
12 * Generic ARM threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24/*---------------------------------------------------------------------------
25 * Start the thread running and terminate it if it returns
26 *---------------------------------------------------------------------------
27 */
28static void __attribute__((naked,used)) start_thread(void)
29{
30 /* r0 = context */
31 asm volatile (
32 "ldr sp, [r0, #32] \n" /* Load initial sp */
33 "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
34 "mov r1, #0 \n" /* Mark thread as running */
35 "str r1, [r0, #40] \n"
36#if NUM_CORES > 1
37 "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */
38 "mov lr, pc \n" /* This could be the first entry into */
39 "bx r0 \n" /* plugin or codec code for this core. */
40#endif
41 "mov lr, pc \n" /* Call thread function */
42 "bx r4 \n"
43 ); /* No clobber list - new thread doesn't care */
44 thread_exit();
45#if 0
46 asm volatile (".ltorg"); /* Dump constant pool */
47#endif
48}
49
50/* For startup, place context pointer in r4 slot, start_thread pointer in r5
51 * slot, and thread function pointer in context.start. See load_context for
52 * what happens when thread is initially going to run. */
53#define THREAD_STARTUP_INIT(core, thread, function) \
54 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
55 (thread)->context.r[1] = (uint32_t)start_thread, \
56 (thread)->context.start = (uint32_t)function; })
57
58
59/*---------------------------------------------------------------------------
60 * Store non-volatile context.
61 *---------------------------------------------------------------------------
62 */
63static inline void store_context(void* addr)
64{
65 asm volatile(
66 "stmia %0, { r4-r11, sp, lr } \n"
67 : : "r" (addr)
68 );
69}
70
71/*---------------------------------------------------------------------------
72 * Load non-volatile context.
73 *---------------------------------------------------------------------------
74 */
75static inline void load_context(const void* addr)
76{
77 asm volatile(
78 "ldr r0, [%0, #40] \n" /* Load start pointer */
79 "cmp r0, #0 \n" /* Check for NULL */
80 "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
81 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
82 : : "r" (addr) : "r0" /* only! */
83 );
84}
85
86#if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \
87|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
88|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2
89/* Use the generic ARMv4/v5/v6 wait for IRQ */
90static inline void core_sleep(void)
91{
92 asm volatile (
93 "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
94#if CONFIG_CPU == IMX31L
95 "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
96#endif
97 : : "r"(0)
98 );
99 enable_irq();
100}
101#else
102/* Skip this if special code is required and implemented */
103#ifndef CPU_PP
104static inline void core_sleep(void)
105{
106 #warning core_sleep not implemented, battery life will be decreased
107 enable_irq();
108}
109#endif /* CPU_PP */
110#endif
111
112
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c
new file mode 100644
index 0000000000..20105ccb59
--- /dev/null
+++ b/firmware/target/arm/thread-pp.c
@@ -0,0 +1,540 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 by Daniel Ankers
11 *
12 * PP5002 and PP502x SoC threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24#if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
25/* Support a special workaround object for large-sector disks */
26#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
27#endif
28
29#if NUM_CORES > 1
30extern uintptr_t cpu_idlestackbegin[];
31extern uintptr_t cpu_idlestackend[];
32extern uintptr_t cop_idlestackbegin[];
33extern uintptr_t cop_idlestackend[];
34static uintptr_t * const idle_stacks[NUM_CORES] =
35{
36 [CPU] = cpu_idlestackbegin,
37 [COP] = cop_idlestackbegin
38};
39
40#if CONFIG_CPU == PP5002
41/* Bytes to emulate the PP502x mailbox bits */
42struct core_semaphores
43{
44 volatile uint8_t intend_wake; /* 00h */
45 volatile uint8_t stay_awake; /* 01h */
46 volatile uint8_t intend_sleep; /* 02h */
47 volatile uint8_t unused; /* 03h */
48};
49
50static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
51#endif /* CONFIG_CPU == PP5002 */
52
53#endif /* NUM_CORES */
54
55#if CONFIG_CORELOCK == SW_CORELOCK
56/* Software core locks using Peterson's mutual exclusion algorithm */
57
58/*---------------------------------------------------------------------------
59 * Initialize the corelock structure.
60 *---------------------------------------------------------------------------
61 */
62void corelock_init(struct corelock *cl)
63{
64 memset(cl, 0, sizeof (*cl));
65}
66
67#if 1 /* Assembly locks to minimize overhead */
68/*---------------------------------------------------------------------------
69 * Wait for the corelock to become free and acquire it when it does.
70 *---------------------------------------------------------------------------
71 */
72void corelock_lock(struct corelock *cl) __attribute__((naked));
73void corelock_lock(struct corelock *cl)
74{
75 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
76 asm volatile (
77 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
78 "ldrb r1, [r1] \n"
79 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
80 "eor r2, r1, #0xff \n" /* r2 = othercore */
81 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
82 "1: \n"
83 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
84 "cmp r3, #0 \n" /* yes? lock acquired */
85 "bxeq lr \n"
86 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
87 "cmp r3, r1 \n"
88 "bxeq lr \n" /* yes? lock acquired */
89 "b 1b \n" /* keep trying */
90 : : "i"(&PROCESSOR_ID)
91 );
92 (void)cl;
93}
94
95/*---------------------------------------------------------------------------
96 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
97 *---------------------------------------------------------------------------
98 */
99int corelock_try_lock(struct corelock *cl) __attribute__((naked));
100int corelock_try_lock(struct corelock *cl)
101{
102 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
103 asm volatile (
104 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
105 "ldrb r1, [r1] \n"
106 "mov r3, r0 \n"
107 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
108 "eor r2, r1, #0xff \n" /* r2 = othercore */
109 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
110 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
111 "eors r0, r0, r2 \n" /* yes? lock acquired */
112 "bxne lr \n"
113 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
114 "ands r0, r0, r1 \n"
115 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
116 "bx lr \n" /* return result */
117 : : "i"(&PROCESSOR_ID)
118 );
119
120 return 0;
121 (void)cl;
122}
123
124/*---------------------------------------------------------------------------
125 * Release ownership of the corelock
126 *---------------------------------------------------------------------------
127 */
128void corelock_unlock(struct corelock *cl) __attribute__((naked));
129void corelock_unlock(struct corelock *cl)
130{
131 asm volatile (
132 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
133 "ldrb r1, [r1] \n"
134 "mov r2, #0 \n" /* cl->myl[core] = 0 */
135 "strb r2, [r0, r1, lsr #7] \n"
136 "bx lr \n"
137 : : "i"(&PROCESSOR_ID)
138 );
139 (void)cl;
140}
141#else /* C versions for reference */
142/*---------------------------------------------------------------------------
143 * Wait for the corelock to become free and aquire it when it does.
144 *---------------------------------------------------------------------------
145 */
146void corelock_lock(struct corelock *cl)
147{
148 const unsigned int core = CURRENT_CORE;
149 const unsigned int othercore = 1 - core;
150
151 cl->myl[core] = core;
152 cl->turn = othercore;
153
154 for (;;)
155 {
156 if (cl->myl[othercore] == 0 || cl->turn == core)
157 break;
158 }
159}
160
161/*---------------------------------------------------------------------------
162 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
163 *---------------------------------------------------------------------------
164 */
165int corelock_try_lock(struct corelock *cl)
166{
167 const unsigned int core = CURRENT_CORE;
168 const unsigned int othercore = 1 - core;
169
170 cl->myl[core] = core;
171 cl->turn = othercore;
172
173 if (cl->myl[othercore] == 0 || cl->turn == core)
174 {
175 return 1;
176 }
177
178 cl->myl[core] = 0;
179 return 0;
180}
181
182/*---------------------------------------------------------------------------
183 * Release ownership of the corelock
184 *---------------------------------------------------------------------------
185 */
186void corelock_unlock(struct corelock *cl)
187{
188 cl->myl[CURRENT_CORE] = 0;
189}
190#endif /* ASM / C selection */
191
192#endif /* CONFIG_CORELOCK == SW_CORELOCK */
193
194/*---------------------------------------------------------------------------
195 * Put core in a power-saving state if waking list wasn't repopulated and if
196 * no other core requested a wakeup for it to perform a task.
197 *---------------------------------------------------------------------------
198 */
199#ifdef CPU_PP502x
200#if NUM_CORES == 1
201static inline void core_sleep(void)
202{
203 sleep_core(CURRENT_CORE);
204 enable_irq();
205}
206#else
207static inline void core_sleep(unsigned int core)
208{
209#if 1
210 asm volatile (
211 "mov r0, #4 \n" /* r0 = 0x4 << core */
212 "mov r0, r0, lsl %[c] \n"
213 "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
214 "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
215 "tst r1, r0, lsl #2 \n"
216 "moveq r1, #0x80000000 \n" /* Then sleep */
217 "streq r1, [%[ctl], %[c], lsl #2] \n"
218 "moveq r1, #0 \n" /* Clear control reg */
219 "streq r1, [%[ctl], %[c], lsl #2] \n"
220 "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
221 "str r1, [%[mbx], #8] \n"
222 "1: \n" /* Wait for wake procedure to finish */
223 "ldr r1, [%[mbx], #0] \n"
224 "tst r1, r0, lsr #2 \n"
225 "bne 1b \n"
226 :
227 : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
228 : "r0", "r1");
229#else /* C version for reference */
230 /* Signal intent to sleep */
231 MBX_MSG_SET = 0x4 << core;
232
233 /* Something waking or other processor intends to wake us? */
234 if ((MBX_MSG_STAT & (0x10 << core)) == 0)
235 {
236 sleep_core(core);
237 wake_core(core);
238 }
239
240 /* Signal wake - clear wake flag */
241 MBX_MSG_CLR = 0x14 << core;
242
243 /* Wait for other processor to finish wake procedure */
244 while (MBX_MSG_STAT & (0x1 << core));
245#endif /* ASM/C selection */
246 enable_irq();
247}
248#endif /* NUM_CORES */
249#elif CONFIG_CPU == PP5002
250#if NUM_CORES == 1
251static inline void core_sleep(void)
252{
253 sleep_core(CURRENT_CORE);
254 enable_irq();
255}
256/* PP5002 has no mailboxes - emulate using bytes */
257static inline void core_sleep(unsigned int core)
258{
259#if 1
260 asm volatile (
261 "mov r0, #1 \n" /* Signal intent to sleep */
262 "strb r0, [%[sem], #2] \n"
263 "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
264 "cmp r0, #0 \n"
265 "bne 2f \n"
266 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
267 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
268 * that the correct alternative is executed. Don't change the order
269 * of the next 4 instructions! */
270 "tst pc, #0x0c \n"
271 "mov r0, #0xca \n"
272 "strne r0, [%[ctl], %[c], lsl #2] \n"
273 "streq r0, [%[ctl], %[c], lsl #2] \n"
274 "nop \n" /* nop's needed because of pipeline */
275 "nop \n"
276 "nop \n"
277 "2: \n"
278 "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
279 "strb r0, [%[sem], #1] \n"
280 "strb r0, [%[sem], #2] \n"
281 "1: \n" /* Wait for wake procedure to finish */
282 "ldrb r0, [%[sem], #0] \n"
283 "cmp r0, #0 \n"
284 "bne 1b \n"
285 :
286 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
287 [ctl]"r"(&CPU_CTL)
288 : "r0"
289 );
290#else /* C version for reference */
291 /* Signal intent to sleep */
292 core_semaphores[core].intend_sleep = 1;
293
294 /* Something waking or other processor intends to wake us? */
295 if (core_semaphores[core].stay_awake == 0)
296 {
297 sleep_core(core);
298 }
299
300 /* Signal wake - clear wake flag */
301 core_semaphores[core].stay_awake = 0;
302 core_semaphores[core].intend_sleep = 0;
303
304 /* Wait for other processor to finish wake procedure */
305 while (core_semaphores[core].intend_wake != 0);
306
307 /* Enable IRQ */
308#endif /* ASM/C selection */
309 enable_irq();
310}
311#endif /* NUM_CORES */
312#endif /* PP CPU type */
313
314/*---------------------------------------------------------------------------
315 * Wake another processor core that is sleeping or prevent it from doing so
316 * if it was already destined. FIQ, IRQ should be disabled before calling.
317 *---------------------------------------------------------------------------
318 */
319#if NUM_CORES == 1
320/* Shared single-core build debugging version */
321void core_wake(void)
322{
323 /* No wakey - core already wakey */
324}
325#elif defined (CPU_PP502x)
326void core_wake(unsigned int othercore)
327{
328#if 1
329 /* avoid r0 since that contains othercore */
330 asm volatile (
331 "mrs r3, cpsr \n" /* Disable IRQ */
332 "orr r1, r3, #0x80 \n"
333 "msr cpsr_c, r1 \n"
334 "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
335 "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
336 "str r2, [%[mbx], #4] \n"
337 "1: \n" /* If it intends to sleep, let it first */
338 "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
339 "eor r1, r1, #0xc \n"
340 "tst r1, r2, lsr #2 \n"
341 "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
342 "tsteq r1, #0x80000000 \n"
343 "beq 1b \n" /* Wait for sleep or wake */
344 "tst r1, #0x80000000 \n" /* If sleeping, wake it */
345 "movne r1, #0x0 \n"
346 "strne r1, [%[ctl], %[oc], lsl #2] \n"
347 "mov r1, r2, lsr #4 \n"
348 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
349 "msr cpsr_c, r3 \n" /* Restore IRQ */
350 :
351 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
352 [oc]"r"(othercore)
353 : "r1", "r2", "r3");
354#else /* C version for reference */
355 /* Disable interrupts - avoid reentrancy from the tick */
356 int oldlevel = disable_irq_save();
357
358 /* Signal intent to wake other processor - set stay awake */
359 MBX_MSG_SET = 0x11 << othercore;
360
361 /* If it intends to sleep, wait until it does or aborts */
362 while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
363 (PROC_CTL(othercore) & PROC_SLEEP) == 0);
364
365 /* If sleeping, wake it up */
366 if (PROC_CTL(othercore) & PROC_SLEEP)
367 PROC_CTL(othercore) = 0;
368
369 /* Done with wake procedure */
370 MBX_MSG_CLR = 0x1 << othercore;
371 restore_irq(oldlevel);
372#endif /* ASM/C selection */
373}
374#elif CONFIG_CPU == PP5002
375/* PP5002 has no mailboxes - emulate using bytes */
376void core_wake(unsigned int othercore)
377{
378#if 1
379 /* avoid r0 since that contains othercore */
380 asm volatile (
381 "mrs r3, cpsr \n" /* Disable IRQ */
382 "orr r1, r3, #0x80 \n"
383 "msr cpsr_c, r1 \n"
384 "mov r1, #1 \n" /* Signal intent to wake other core */
385 "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
386 "strh r1, [%[sem], #0] \n"
387 "mov r2, #0x8000 \n"
388 "1: \n" /* If it intends to sleep, let it first */
389 "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
390 "cmp r1, #1 \n"
391 "ldr r1, [%[st]] \n" /* && not sleeping ? */
392 "tsteq r1, r2, lsr %[oc] \n"
393 "beq 1b \n" /* Wait for sleep or wake */
394 "tst r1, r2, lsr %[oc] \n"
395 "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
396 "movne r1, #0xce \n"
397 "strne r1, [r2, %[oc], lsl #2] \n"
398 "mov r1, #0 \n" /* Done with wake procedure */
399 "strb r1, [%[sem], #0] \n"
400 "msr cpsr_c, r3 \n" /* Restore IRQ */
401 :
402 : [sem]"r"(&core_semaphores[othercore]),
403 [st]"r"(&PROC_STAT),
404 [oc]"r"(othercore)
405 : "r1", "r2", "r3"
406 );
407#else /* C version for reference */
408 /* Disable interrupts - avoid reentrancy from the tick */
409 int oldlevel = disable_irq_save();
410
411 /* Signal intent to wake other processor - set stay awake */
412 core_semaphores[othercore].intend_wake = 1;
413 core_semaphores[othercore].stay_awake = 1;
414
415 /* If it intends to sleep, wait until it does or aborts */
416 while (core_semaphores[othercore].intend_sleep != 0 &&
417 (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
418
419 /* If sleeping, wake it up */
420 if (PROC_STAT & PROC_SLEEPING(othercore))
421 wake_core(othercore);
422
423 /* Done with wake procedure */
424 core_semaphores[othercore].intend_wake = 0;
425 restore_irq(oldlevel);
426#endif /* ASM/C selection */
427}
428#endif /* CPU type */
429
430#if NUM_CORES > 1
431/*---------------------------------------------------------------------------
432 * Switches to a stack that always resides in the Rockbox core.
433 *
434 * Needed when a thread suicides on a core other than the main CPU since the
435 * stack used when idling is the stack of the last thread to run. This stack
436 * may not reside in the core firmware in which case the core will continue
437 * to use a stack from an unloaded module until another thread runs on it.
438 *---------------------------------------------------------------------------
439 */
440static inline void switch_to_idle_stack(const unsigned int core)
441{
442 asm volatile (
443 "str sp, [%0] \n" /* save original stack pointer on idle stack */
444 "mov sp, %0 \n" /* switch stacks */
445 : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
446 (void)core;
447}
448
449/*---------------------------------------------------------------------------
450 * Perform core switch steps that need to take place inside switch_thread.
451 *
452 * These steps must take place while before changing the processor and after
453 * having entered switch_thread since switch_thread may not do a normal return
454 * because the stack being used for anything the compiler saved will not belong
455 * to the thread's destination core and it may have been recycled for other
456 * purposes by the time a normal context load has taken place. switch_thread
457 * will also clobber anything stashed in the thread's context or stored in the
458 * nonvolatile registers if it is saved there before the call since the
459 * compiler's order of operations cannot be known for certain.
460 */
461static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
462{
463 /* Flush our data to ram */
464 cpucache_flush();
465 /* Stash thread in r4 slot */
466 thread->context.r[0] = (uint32_t)thread;
467 /* Stash restart address in r5 slot */
468 thread->context.r[1] = thread->context.start;
469 /* Save sp in context.sp while still running on old core */
470 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
471}
472
473/*---------------------------------------------------------------------------
474 * Machine-specific helper function for switching the processor a thread is
475 * running on. Basically, the thread suicides on the departing core and is
476 * reborn on the destination. Were it not for gcc's ill-behavior regarding
477 * naked functions written in C where it actually clobbers non-volatile
478 * registers before the intended prologue code, this would all be much
479 * simpler. Generic setup is done in switch_core itself.
480 */
481
482/*---------------------------------------------------------------------------
483 * This actually performs the core switch.
484 */
485static void __attribute__((naked))
486 switch_thread_core(unsigned int core, struct thread_entry *thread)
487{
488 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
489 * Stack access also isn't permitted until restoring the original stack and
490 * context. */
491 asm volatile (
492 "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
493 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
494 "ldr r2, [r2, r0, lsl #2] \n"
495 "add r2, r2, %0*4 \n"
496 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
497 "mov sp, r2 \n" /* switch stacks */
498 "adr r2, 1f \n" /* r2 = new core restart address */
499 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
500 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
501 "1: \n"
502 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
503 "mov r1, #0 \n" /* Clear start address */
504 "str r1, [r0, #40] \n"
505 "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
506 "mov lr, pc \n"
507 "bx r0 \n"
508 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
509 ".ltorg \n" /* Dump constant pool */
510 : : "i"(IDLE_STACK_WORDS)
511 );
512 (void)core; (void)thread;
513}
514
515/*---------------------------------------------------------------------------
516 * Do any device-specific inits for the threads and synchronize the kernel
517 * initializations.
518 *---------------------------------------------------------------------------
519 */
520static void core_thread_init(unsigned int core) INIT_ATTR;
521static void core_thread_init(unsigned int core)
522{
523 if (core == CPU)
524 {
525 /* Wake up coprocessor and let it initialize kernel and threads */
526#ifdef CPU_PP502x
527 MBX_MSG_CLR = 0x3f;
528#endif
529 wake_core(COP);
530 /* Sleep until COP has finished */
531 sleep_core(CPU);
532 }
533 else
534 {
535 /* Wake the CPU and return */
536 wake_core(CPU);
537 }
538}
539#endif /* NUM_CORES */
540
diff --git a/firmware/target/coldfire/thread-coldfire.c b/firmware/target/coldfire/thread-coldfire.c
new file mode 100644
index 0000000000..f151a971c7
--- /dev/null
+++ b/firmware/target/coldfire/thread-coldfire.c
@@ -0,0 +1,97 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2004 by Linus Nielsen Feltzing
11 *
12 * Coldfire processor threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24/*---------------------------------------------------------------------------
25 * Start the thread running and terminate it if it returns
26 *---------------------------------------------------------------------------
27 */
28void start_thread(void); /* Provide C access to ASM label */
29static void __attribute__((used)) __start_thread(void)
30{
31 /* a0=macsr, a1=context */
32 asm volatile (
33 "start_thread: \n" /* Start here - no naked attribute */
34 "move.l %a0, %macsr \n" /* Set initial mac status reg */
35 "lea.l 48(%a1), %a1 \n"
36 "move.l (%a1)+, %sp \n" /* Set initial stack */
37 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
38 "clr.l (%a1) \n" /* Mark thread running */
39 "jsr (%a2) \n" /* Call thread function */
40 );
41 thread_exit();
42}
43
44/* Set EMAC unit to fractional mode with saturation for each new thread,
45 * since that's what'll be the most useful for most things which the dsp
46 * will do. Codecs should still initialize their preferred modes
47 * explicitly. Context pointer is placed in d2 slot and start_thread
48 * pointer in d3 slot. thread function pointer is placed in context.start.
49 * See load_context for what happens when thread is initially going to
50 * run.
51 */
52#define THREAD_STARTUP_INIT(core, thread, function) \
53 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
54 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
55 (thread)->context.d[1] = (uint32_t)start_thread, \
56 (thread)->context.start = (uint32_t)(function); })
57
58/*---------------------------------------------------------------------------
59 * Store non-volatile context.
60 *---------------------------------------------------------------------------
61 */
62static inline void store_context(void* addr)
63{
64 asm volatile (
65 "move.l %%macsr,%%d0 \n"
66 "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
67 : : "a" (addr) : "d0" /* only! */
68 );
69}
70
71/*---------------------------------------------------------------------------
72 * Load non-volatile context.
73 *---------------------------------------------------------------------------
74 */
75static inline void load_context(const void* addr)
76{
77 asm volatile (
78 "move.l 52(%0), %%d0 \n" /* Get start address */
79 "beq.b 1f \n" /* NULL -> already running */
80 "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
81 "jmp (%%a2) \n" /* Start the thread */
82 "1: \n"
83 "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
84 "move.l %%d0, %%macsr \n"
85 : : "a" (addr) : "d0" /* only! */
86 );
87}
88
89/*---------------------------------------------------------------------------
90 * Put core in a power-saving state if waking list wasn't repopulated.
91 *---------------------------------------------------------------------------
92 */
93static inline void core_sleep(void)
94{
95 /* Supervisor mode, interrupts enabled upon wakeup */
96 asm volatile ("stop #0x2000");
97};
diff --git a/firmware/target/mips/thread-mips32.c b/firmware/target/mips/thread-mips32.c
new file mode 100644
index 0000000000..e2fccb8022
--- /dev/null
+++ b/firmware/target/mips/thread-mips32.c
@@ -0,0 +1,133 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2008 by Maurus Cuelenaere
11 *
12 * 32-bit MIPS threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24/*---------------------------------------------------------------------------
25 * Start the thread running and terminate it if it returns
26 *---------------------------------------------------------------------------
27 */
28
29void start_thread(void); /* Provide C access to ASM label */
30static void __attribute__((used)) _start_thread(void)
31{
32 /* t1 = context */
33 asm volatile (
34 "start_thread: \n"
35 ".set noreorder \n"
36 ".set noat \n"
37 "lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */
38 "lw $29, 36($9) \n" /* Set initial sp(=$29) */
39 "jalr $8 \n" /* Start the thread */
40 "sw $0, 44($9) \n" /* Clear start address */
41 ".set at \n"
42 ".set reorder \n"
43 );
44 thread_exit();
45}
46
47/* Place context pointer in s0 slot, function pointer in s1 slot, and
48 * start_thread pointer in context_start */
49#define THREAD_STARTUP_INIT(core, thread, function) \
50 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
51 (thread)->context.r[1] = (uint32_t)(function), \
52 (thread)->context.start = (uint32_t)start_thread; })
53
54/*---------------------------------------------------------------------------
55 * Store non-volatile context.
56 *---------------------------------------------------------------------------
57 */
58static inline void store_context(void* addr)
59{
60 asm volatile (
61 ".set noreorder \n"
62 ".set noat \n"
63 "sw $16, 0(%0) \n" /* s0 */
64 "sw $17, 4(%0) \n" /* s1 */
65 "sw $18, 8(%0) \n" /* s2 */
66 "sw $19, 12(%0) \n" /* s3 */
67 "sw $20, 16(%0) \n" /* s4 */
68 "sw $21, 20(%0) \n" /* s5 */
69 "sw $22, 24(%0) \n" /* s6 */
70 "sw $23, 28(%0) \n" /* s7 */
71 "sw $30, 32(%0) \n" /* fp */
72 "sw $29, 36(%0) \n" /* sp */
73 "sw $31, 40(%0) \n" /* ra */
74 ".set at \n"
75 ".set reorder \n"
76 : : "r" (addr)
77 );
78}
79
80/*---------------------------------------------------------------------------
81 * Load non-volatile context.
82 *---------------------------------------------------------------------------
83 */
84static inline void load_context(const void* addr)
85{
86 asm volatile (
87 ".set noat \n"
88 ".set noreorder \n"
89 "lw $8, 44(%0) \n" /* Get start address ($8 = t0) */
90 "beqz $8, running \n" /* NULL -> already running */
91 "nop \n"
92 "jr $8 \n"
93 "move $9, %0 \n" /* t1 = context */
94 "running: \n"
95 "lw $16, 0(%0) \n" /* s0 */
96 "lw $17, 4(%0) \n" /* s1 */
97 "lw $18, 8(%0) \n" /* s2 */
98 "lw $19, 12(%0) \n" /* s3 */
99 "lw $20, 16(%0) \n" /* s4 */
100 "lw $21, 20(%0) \n" /* s5 */
101 "lw $22, 24(%0) \n" /* s6 */
102 "lw $23, 28(%0) \n" /* s7 */
103 "lw $30, 32(%0) \n" /* fp */
104 "lw $29, 36(%0) \n" /* sp */
105 "lw $31, 40(%0) \n" /* ra */
106 ".set at \n"
107 ".set reorder \n"
108 : : "r" (addr) : "t0", "t1"
109 );
110}
111
112/*---------------------------------------------------------------------------
113 * Put core in a power-saving state.
114 *---------------------------------------------------------------------------
115 */
116static inline void core_sleep(void)
117{
118#if CONFIG_CPU == JZ4732
119 __cpm_idle_mode();
120#endif
121 asm volatile(".set mips32r2 \n"
122 "mfc0 $8, $12 \n" /* mfc t0, $12 */
123 "move $9, $8 \n" /* move t1, t0 */
124 "la $10, 0x8000000 \n" /* la t2, 0x8000000 */
125 "or $8, $8, $10 \n" /* Enable reduced power mode */
126 "mtc0 $8, $12 \n" /* mtc t0, $12 */
127 "wait \n"
128 "mtc0 $9, $12 \n" /* mtc t1, $12 */
129 ".set mips0 \n"
130 ::: "t0", "t1", "t2"
131 );
132 enable_irq();
133}
diff --git a/firmware/target/sh/thread-sh.c b/firmware/target/sh/thread-sh.c
new file mode 100644
index 0000000000..25e0aadf96
--- /dev/null
+++ b/firmware/target/sh/thread-sh.c
@@ -0,0 +1,109 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * SH processor threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24/*---------------------------------------------------------------------------
25 * Start the thread running and terminate it if it returns
26 *---------------------------------------------------------------------------
27 */
28void start_thread(void); /* Provide C access to ASM label */
29static void __attribute__((used)) __start_thread(void)
30{
31 /* r8 = context */
32 asm volatile (
33 "_start_thread: \n" /* Start here - no naked attribute */
34 "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
35 "mov.l @(28, r8), r15 \n" /* Set initial sp */
36 "mov #0, r1 \n" /* Start the thread */
37 "jsr @r0 \n"
38 "mov.l r1, @(36, r8) \n" /* Clear start address */
39 );
40 thread_exit();
41}
42
43/* Place context pointer in r8 slot, function pointer in r9 slot, and
44 * start_thread pointer in context_start */
45#define THREAD_STARTUP_INIT(core, thread, function) \
46 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
47 (thread)->context.r[1] = (uint32_t)(function), \
48 (thread)->context.start = (uint32_t)start_thread; })
49
50/*---------------------------------------------------------------------------
51 * Store non-volatile context.
52 *---------------------------------------------------------------------------
53 */
54static inline void store_context(void* addr)
55{
56 asm volatile (
57 "add #36, %0 \n" /* Start at last reg. By the time routine */
58 "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
59 "mov.l r15,@-%0 \n"
60 "mov.l r14,@-%0 \n"
61 "mov.l r13,@-%0 \n"
62 "mov.l r12,@-%0 \n"
63 "mov.l r11,@-%0 \n"
64 "mov.l r10,@-%0 \n"
65 "mov.l r9, @-%0 \n"
66 "mov.l r8, @-%0 \n"
67 : : "r" (addr)
68 );
69}
70
71/*---------------------------------------------------------------------------
72 * Load non-volatile context.
73 *---------------------------------------------------------------------------
74 */
75static inline void load_context(const void* addr)
76{
77 asm volatile (
78 "mov.l @(36, %0), r0 \n" /* Get start address */
79 "tst r0, r0 \n"
80 "bt .running \n" /* NULL -> already running */
81 "jmp @r0 \n" /* r8 = context */
82 ".running: \n"
83 "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
84 "mov.l @%0+, r9 \n"
85 "mov.l @%0+, r10 \n"
86 "mov.l @%0+, r11 \n"
87 "mov.l @%0+, r12 \n"
88 "mov.l @%0+, r13 \n"
89 "mov.l @%0+, r14 \n"
90 "mov.l @%0+, r15 \n"
91 "lds.l @%0+, pr \n"
92 : : "r" (addr) : "r0" /* only! */
93 );
94}
95
96/*---------------------------------------------------------------------------
97 * Put core in a power-saving state.
98 *---------------------------------------------------------------------------
99 */
100static inline void core_sleep(void)
101{
102 asm volatile (
103 "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
104 "mov #0, r1 \n" /* Enable interrupts */
105 "ldc r1, sr \n" /* Following instruction cannot be interrupted */
106 "sleep \n" /* Execute standby */
107 : : "z"(&SBYCR-GBR) : "r1");
108}
109