diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2010-06-02 12:45:36 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2010-06-02 12:45:36 +0000 |
commit | 555ad6710fd897bfc12549197b606c90b06000b4 (patch) | |
tree | f0c578888b88e29260793f083361b6d2fd4c2d93 | |
parent | 6ebe76c147b00d2decd9501ad45ab7fd6db5b9c0 (diff) | |
download | rockbox-555ad6710fd897bfc12549197b606c90b06000b4.tar.gz rockbox-555ad6710fd897bfc12549197b606c90b06000b4.zip |
Threading: Split processor support code into respective target files. C files from /target/xxx are included into thread.c because of essential inlining and files are code, not declarations. Copyrights in each new file go to whoever implemented the first functional support.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26479 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r-- | firmware/target/arm/thread-arm.c | 112 | ||||
-rw-r--r-- | firmware/target/arm/thread-pp.c | 540 | ||||
-rw-r--r-- | firmware/target/coldfire/thread-coldfire.c | 97 | ||||
-rw-r--r-- | firmware/target/mips/thread-mips32.c | 133 | ||||
-rw-r--r-- | firmware/target/sh/thread-sh.c | 109 | ||||
-rw-r--r-- | firmware/thread.c | 891 |
6 files changed, 1004 insertions, 878 deletions
diff --git a/firmware/target/arm/thread-arm.c b/firmware/target/arm/thread-arm.c new file mode 100644 index 0000000000..c2d91cec25 --- /dev/null +++ b/firmware/target/arm/thread-arm.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2005 by Thom Johansen | ||
11 | * | ||
12 | * Generic ARM threading support | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version 2 | ||
17 | * of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
20 | * KIND, either express or implied. | ||
21 | * | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | /*--------------------------------------------------------------------------- | ||
25 | * Start the thread running and terminate it if it returns | ||
26 | *--------------------------------------------------------------------------- | ||
27 | */ | ||
28 | static void __attribute__((naked,used)) start_thread(void) | ||
29 | { | ||
30 | /* r0 = context */ | ||
31 | asm volatile ( | ||
32 | "ldr sp, [r0, #32] \n" /* Load initial sp */ | ||
33 | "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */ | ||
34 | "mov r1, #0 \n" /* Mark thread as running */ | ||
35 | "str r1, [r0, #40] \n" | ||
36 | #if NUM_CORES > 1 | ||
37 | "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */ | ||
38 | "mov lr, pc \n" /* This could be the first entry into */ | ||
39 | "bx r0 \n" /* plugin or codec code for this core. */ | ||
40 | #endif | ||
41 | "mov lr, pc \n" /* Call thread function */ | ||
42 | "bx r4 \n" | ||
43 | ); /* No clobber list - new thread doesn't care */ | ||
44 | thread_exit(); | ||
45 | #if 0 | ||
46 | asm volatile (".ltorg"); /* Dump constant pool */ | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | /* For startup, place context pointer in r4 slot, start_thread pointer in r5 | ||
51 | * slot, and thread function pointer in context.start. See load_context for | ||
52 | * what happens when thread is initially going to run. */ | ||
53 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
54 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ | ||
55 | (thread)->context.r[1] = (uint32_t)start_thread, \ | ||
56 | (thread)->context.start = (uint32_t)function; }) | ||
57 | |||
58 | |||
59 | /*--------------------------------------------------------------------------- | ||
60 | * Store non-volatile context. | ||
61 | *--------------------------------------------------------------------------- | ||
62 | */ | ||
63 | static inline void store_context(void* addr) | ||
64 | { | ||
65 | asm volatile( | ||
66 | "stmia %0, { r4-r11, sp, lr } \n" | ||
67 | : : "r" (addr) | ||
68 | ); | ||
69 | } | ||
70 | |||
71 | /*--------------------------------------------------------------------------- | ||
72 | * Load non-volatile context. | ||
73 | *--------------------------------------------------------------------------- | ||
74 | */ | ||
75 | static inline void load_context(const void* addr) | ||
76 | { | ||
77 | asm volatile( | ||
78 | "ldr r0, [%0, #40] \n" /* Load start pointer */ | ||
79 | "cmp r0, #0 \n" /* Check for NULL */ | ||
80 | "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */ | ||
81 | "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ | ||
82 | : : "r" (addr) : "r0" /* only! */ | ||
83 | ); | ||
84 | } | ||
85 | |||
86 | #if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \ | ||
87 | || CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \ | ||
88 | || CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 | ||
89 | /* Use the generic ARMv4/v5/v6 wait for IRQ */ | ||
90 | static inline void core_sleep(void) | ||
91 | { | ||
92 | asm volatile ( | ||
93 | "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */ | ||
94 | #if CONFIG_CPU == IMX31L | ||
95 | "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */ | ||
96 | #endif | ||
97 | : : "r"(0) | ||
98 | ); | ||
99 | enable_irq(); | ||
100 | } | ||
101 | #else | ||
102 | /* Skip this if special code is required and implemented */ | ||
103 | #ifndef CPU_PP | ||
104 | static inline void core_sleep(void) | ||
105 | { | ||
106 | #warning core_sleep not implemented, battery life will be decreased | ||
107 | enable_irq(); | ||
108 | } | ||
109 | #endif /* CPU_PP */ | ||
110 | #endif | ||
111 | |||
112 | |||
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c new file mode 100644 index 0000000000..20105ccb59 --- /dev/null +++ b/firmware/target/arm/thread-pp.c | |||
@@ -0,0 +1,540 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2007 by Daniel Ankers | ||
11 | * | ||
12 | * PP5002 and PP502x SoC threading support | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version 2 | ||
17 | * of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
20 | * KIND, either express or implied. | ||
21 | * | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64 | ||
25 | /* Support a special workaround object for large-sector disks */ | ||
26 | #define IF_NO_SKIP_YIELD(...) __VA_ARGS__ | ||
27 | #endif | ||
28 | |||
29 | #if NUM_CORES > 1 | ||
30 | extern uintptr_t cpu_idlestackbegin[]; | ||
31 | extern uintptr_t cpu_idlestackend[]; | ||
32 | extern uintptr_t cop_idlestackbegin[]; | ||
33 | extern uintptr_t cop_idlestackend[]; | ||
34 | static uintptr_t * const idle_stacks[NUM_CORES] = | ||
35 | { | ||
36 | [CPU] = cpu_idlestackbegin, | ||
37 | [COP] = cop_idlestackbegin | ||
38 | }; | ||
39 | |||
40 | #if CONFIG_CPU == PP5002 | ||
41 | /* Bytes to emulate the PP502x mailbox bits */ | ||
42 | struct core_semaphores | ||
43 | { | ||
44 | volatile uint8_t intend_wake; /* 00h */ | ||
45 | volatile uint8_t stay_awake; /* 01h */ | ||
46 | volatile uint8_t intend_sleep; /* 02h */ | ||
47 | volatile uint8_t unused; /* 03h */ | ||
48 | }; | ||
49 | |||
50 | static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR; | ||
51 | #endif /* CONFIG_CPU == PP5002 */ | ||
52 | |||
53 | #endif /* NUM_CORES */ | ||
54 | |||
55 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
56 | /* Software core locks using Peterson's mutual exclusion algorithm */ | ||
57 | |||
58 | /*--------------------------------------------------------------------------- | ||
59 | * Initialize the corelock structure. | ||
60 | *--------------------------------------------------------------------------- | ||
61 | */ | ||
62 | void corelock_init(struct corelock *cl) | ||
63 | { | ||
64 | memset(cl, 0, sizeof (*cl)); | ||
65 | } | ||
66 | |||
67 | #if 1 /* Assembly locks to minimize overhead */ | ||
68 | /*--------------------------------------------------------------------------- | ||
69 | * Wait for the corelock to become free and acquire it when it does. | ||
70 | *--------------------------------------------------------------------------- | ||
71 | */ | ||
72 | void corelock_lock(struct corelock *cl) __attribute__((naked)); | ||
73 | void corelock_lock(struct corelock *cl) | ||
74 | { | ||
75 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
76 | asm volatile ( | ||
77 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
78 | "ldrb r1, [r1] \n" | ||
79 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
80 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
81 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
82 | "1: \n" | ||
83 | "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
84 | "cmp r3, #0 \n" /* yes? lock acquired */ | ||
85 | "bxeq lr \n" | ||
86 | "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ | ||
87 | "cmp r3, r1 \n" | ||
88 | "bxeq lr \n" /* yes? lock acquired */ | ||
89 | "b 1b \n" /* keep trying */ | ||
90 | : : "i"(&PROCESSOR_ID) | ||
91 | ); | ||
92 | (void)cl; | ||
93 | } | ||
94 | |||
95 | /*--------------------------------------------------------------------------- | ||
96 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
97 | *--------------------------------------------------------------------------- | ||
98 | */ | ||
99 | int corelock_try_lock(struct corelock *cl) __attribute__((naked)); | ||
100 | int corelock_try_lock(struct corelock *cl) | ||
101 | { | ||
102 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
103 | asm volatile ( | ||
104 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
105 | "ldrb r1, [r1] \n" | ||
106 | "mov r3, r0 \n" | ||
107 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
108 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
109 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
110 | "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
111 | "eors r0, r0, r2 \n" /* yes? lock acquired */ | ||
112 | "bxne lr \n" | ||
113 | "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ | ||
114 | "ands r0, r0, r1 \n" | ||
115 | "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ | ||
116 | "bx lr \n" /* return result */ | ||
117 | : : "i"(&PROCESSOR_ID) | ||
118 | ); | ||
119 | |||
120 | return 0; | ||
121 | (void)cl; | ||
122 | } | ||
123 | |||
124 | /*--------------------------------------------------------------------------- | ||
125 | * Release ownership of the corelock | ||
126 | *--------------------------------------------------------------------------- | ||
127 | */ | ||
128 | void corelock_unlock(struct corelock *cl) __attribute__((naked)); | ||
129 | void corelock_unlock(struct corelock *cl) | ||
130 | { | ||
131 | asm volatile ( | ||
132 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
133 | "ldrb r1, [r1] \n" | ||
134 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ | ||
135 | "strb r2, [r0, r1, lsr #7] \n" | ||
136 | "bx lr \n" | ||
137 | : : "i"(&PROCESSOR_ID) | ||
138 | ); | ||
139 | (void)cl; | ||
140 | } | ||
141 | #else /* C versions for reference */ | ||
142 | /*--------------------------------------------------------------------------- | ||
143 | * Wait for the corelock to become free and aquire it when it does. | ||
144 | *--------------------------------------------------------------------------- | ||
145 | */ | ||
146 | void corelock_lock(struct corelock *cl) | ||
147 | { | ||
148 | const unsigned int core = CURRENT_CORE; | ||
149 | const unsigned int othercore = 1 - core; | ||
150 | |||
151 | cl->myl[core] = core; | ||
152 | cl->turn = othercore; | ||
153 | |||
154 | for (;;) | ||
155 | { | ||
156 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
157 | break; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | /*--------------------------------------------------------------------------- | ||
162 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
163 | *--------------------------------------------------------------------------- | ||
164 | */ | ||
165 | int corelock_try_lock(struct corelock *cl) | ||
166 | { | ||
167 | const unsigned int core = CURRENT_CORE; | ||
168 | const unsigned int othercore = 1 - core; | ||
169 | |||
170 | cl->myl[core] = core; | ||
171 | cl->turn = othercore; | ||
172 | |||
173 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
174 | { | ||
175 | return 1; | ||
176 | } | ||
177 | |||
178 | cl->myl[core] = 0; | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | /*--------------------------------------------------------------------------- | ||
183 | * Release ownership of the corelock | ||
184 | *--------------------------------------------------------------------------- | ||
185 | */ | ||
186 | void corelock_unlock(struct corelock *cl) | ||
187 | { | ||
188 | cl->myl[CURRENT_CORE] = 0; | ||
189 | } | ||
190 | #endif /* ASM / C selection */ | ||
191 | |||
192 | #endif /* CONFIG_CORELOCK == SW_CORELOCK */ | ||
193 | |||
194 | /*--------------------------------------------------------------------------- | ||
195 | * Put core in a power-saving state if waking list wasn't repopulated and if | ||
196 | * no other core requested a wakeup for it to perform a task. | ||
197 | *--------------------------------------------------------------------------- | ||
198 | */ | ||
199 | #ifdef CPU_PP502x | ||
200 | #if NUM_CORES == 1 | ||
201 | static inline void core_sleep(void) | ||
202 | { | ||
203 | sleep_core(CURRENT_CORE); | ||
204 | enable_irq(); | ||
205 | } | ||
206 | #else | ||
207 | static inline void core_sleep(unsigned int core) | ||
208 | { | ||
209 | #if 1 | ||
210 | asm volatile ( | ||
211 | "mov r0, #4 \n" /* r0 = 0x4 << core */ | ||
212 | "mov r0, r0, lsl %[c] \n" | ||
213 | "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ | ||
214 | "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */ | ||
215 | "tst r1, r0, lsl #2 \n" | ||
216 | "moveq r1, #0x80000000 \n" /* Then sleep */ | ||
217 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
218 | "moveq r1, #0 \n" /* Clear control reg */ | ||
219 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
220 | "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */ | ||
221 | "str r1, [%[mbx], #8] \n" | ||
222 | "1: \n" /* Wait for wake procedure to finish */ | ||
223 | "ldr r1, [%[mbx], #0] \n" | ||
224 | "tst r1, r0, lsr #2 \n" | ||
225 | "bne 1b \n" | ||
226 | : | ||
227 | : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core) | ||
228 | : "r0", "r1"); | ||
229 | #else /* C version for reference */ | ||
230 | /* Signal intent to sleep */ | ||
231 | MBX_MSG_SET = 0x4 << core; | ||
232 | |||
233 | /* Something waking or other processor intends to wake us? */ | ||
234 | if ((MBX_MSG_STAT & (0x10 << core)) == 0) | ||
235 | { | ||
236 | sleep_core(core); | ||
237 | wake_core(core); | ||
238 | } | ||
239 | |||
240 | /* Signal wake - clear wake flag */ | ||
241 | MBX_MSG_CLR = 0x14 << core; | ||
242 | |||
243 | /* Wait for other processor to finish wake procedure */ | ||
244 | while (MBX_MSG_STAT & (0x1 << core)); | ||
245 | #endif /* ASM/C selection */ | ||
246 | enable_irq(); | ||
247 | } | ||
248 | #endif /* NUM_CORES */ | ||
249 | #elif CONFIG_CPU == PP5002 | ||
250 | #if NUM_CORES == 1 | ||
251 | static inline void core_sleep(void) | ||
252 | { | ||
253 | sleep_core(CURRENT_CORE); | ||
254 | enable_irq(); | ||
255 | } | ||
256 | /* PP5002 has no mailboxes - emulate using bytes */ | ||
257 | static inline void core_sleep(unsigned int core) | ||
258 | { | ||
259 | #if 1 | ||
260 | asm volatile ( | ||
261 | "mov r0, #1 \n" /* Signal intent to sleep */ | ||
262 | "strb r0, [%[sem], #2] \n" | ||
263 | "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */ | ||
264 | "cmp r0, #0 \n" | ||
265 | "bne 2f \n" | ||
266 | /* Sleep: PP5002 crashes if the instruction that puts it to sleep is | ||
267 | * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure | ||
268 | * that the correct alternative is executed. Don't change the order | ||
269 | * of the next 4 instructions! */ | ||
270 | "tst pc, #0x0c \n" | ||
271 | "mov r0, #0xca \n" | ||
272 | "strne r0, [%[ctl], %[c], lsl #2] \n" | ||
273 | "streq r0, [%[ctl], %[c], lsl #2] \n" | ||
274 | "nop \n" /* nop's needed because of pipeline */ | ||
275 | "nop \n" | ||
276 | "nop \n" | ||
277 | "2: \n" | ||
278 | "mov r0, #0 \n" /* Clear stay_awake and sleep intent */ | ||
279 | "strb r0, [%[sem], #1] \n" | ||
280 | "strb r0, [%[sem], #2] \n" | ||
281 | "1: \n" /* Wait for wake procedure to finish */ | ||
282 | "ldrb r0, [%[sem], #0] \n" | ||
283 | "cmp r0, #0 \n" | ||
284 | "bne 1b \n" | ||
285 | : | ||
286 | : [sem]"r"(&core_semaphores[core]), [c]"r"(core), | ||
287 | [ctl]"r"(&CPU_CTL) | ||
288 | : "r0" | ||
289 | ); | ||
290 | #else /* C version for reference */ | ||
291 | /* Signal intent to sleep */ | ||
292 | core_semaphores[core].intend_sleep = 1; | ||
293 | |||
294 | /* Something waking or other processor intends to wake us? */ | ||
295 | if (core_semaphores[core].stay_awake == 0) | ||
296 | { | ||
297 | sleep_core(core); | ||
298 | } | ||
299 | |||
300 | /* Signal wake - clear wake flag */ | ||
301 | core_semaphores[core].stay_awake = 0; | ||
302 | core_semaphores[core].intend_sleep = 0; | ||
303 | |||
304 | /* Wait for other processor to finish wake procedure */ | ||
305 | while (core_semaphores[core].intend_wake != 0); | ||
306 | |||
307 | /* Enable IRQ */ | ||
308 | #endif /* ASM/C selection */ | ||
309 | enable_irq(); | ||
310 | } | ||
311 | #endif /* NUM_CORES */ | ||
312 | #endif /* PP CPU type */ | ||
313 | |||
314 | /*--------------------------------------------------------------------------- | ||
315 | * Wake another processor core that is sleeping or prevent it from doing so | ||
316 | * if it was already destined. FIQ, IRQ should be disabled before calling. | ||
317 | *--------------------------------------------------------------------------- | ||
318 | */ | ||
319 | #if NUM_CORES == 1 | ||
320 | /* Shared single-core build debugging version */ | ||
321 | void core_wake(void) | ||
322 | { | ||
323 | /* No wakey - core already wakey */ | ||
324 | } | ||
325 | #elif defined (CPU_PP502x) | ||
326 | void core_wake(unsigned int othercore) | ||
327 | { | ||
328 | #if 1 | ||
329 | /* avoid r0 since that contains othercore */ | ||
330 | asm volatile ( | ||
331 | "mrs r3, cpsr \n" /* Disable IRQ */ | ||
332 | "orr r1, r3, #0x80 \n" | ||
333 | "msr cpsr_c, r1 \n" | ||
334 | "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */ | ||
335 | "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */ | ||
336 | "str r2, [%[mbx], #4] \n" | ||
337 | "1: \n" /* If it intends to sleep, let it first */ | ||
338 | "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */ | ||
339 | "eor r1, r1, #0xc \n" | ||
340 | "tst r1, r2, lsr #2 \n" | ||
341 | "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */ | ||
342 | "tsteq r1, #0x80000000 \n" | ||
343 | "beq 1b \n" /* Wait for sleep or wake */ | ||
344 | "tst r1, #0x80000000 \n" /* If sleeping, wake it */ | ||
345 | "movne r1, #0x0 \n" | ||
346 | "strne r1, [%[ctl], %[oc], lsl #2] \n" | ||
347 | "mov r1, r2, lsr #4 \n" | ||
348 | "str r1, [%[mbx], #8] \n" /* Done with wake procedure */ | ||
349 | "msr cpsr_c, r3 \n" /* Restore IRQ */ | ||
350 | : | ||
351 | : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), | ||
352 | [oc]"r"(othercore) | ||
353 | : "r1", "r2", "r3"); | ||
354 | #else /* C version for reference */ | ||
355 | /* Disable interrupts - avoid reentrancy from the tick */ | ||
356 | int oldlevel = disable_irq_save(); | ||
357 | |||
358 | /* Signal intent to wake other processor - set stay awake */ | ||
359 | MBX_MSG_SET = 0x11 << othercore; | ||
360 | |||
361 | /* If it intends to sleep, wait until it does or aborts */ | ||
362 | while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 && | ||
363 | (PROC_CTL(othercore) & PROC_SLEEP) == 0); | ||
364 | |||
365 | /* If sleeping, wake it up */ | ||
366 | if (PROC_CTL(othercore) & PROC_SLEEP) | ||
367 | PROC_CTL(othercore) = 0; | ||
368 | |||
369 | /* Done with wake procedure */ | ||
370 | MBX_MSG_CLR = 0x1 << othercore; | ||
371 | restore_irq(oldlevel); | ||
372 | #endif /* ASM/C selection */ | ||
373 | } | ||
374 | #elif CONFIG_CPU == PP5002 | ||
375 | /* PP5002 has no mailboxes - emulate using bytes */ | ||
376 | void core_wake(unsigned int othercore) | ||
377 | { | ||
378 | #if 1 | ||
379 | /* avoid r0 since that contains othercore */ | ||
380 | asm volatile ( | ||
381 | "mrs r3, cpsr \n" /* Disable IRQ */ | ||
382 | "orr r1, r3, #0x80 \n" | ||
383 | "msr cpsr_c, r1 \n" | ||
384 | "mov r1, #1 \n" /* Signal intent to wake other core */ | ||
385 | "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */ | ||
386 | "strh r1, [%[sem], #0] \n" | ||
387 | "mov r2, #0x8000 \n" | ||
388 | "1: \n" /* If it intends to sleep, let it first */ | ||
389 | "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */ | ||
390 | "cmp r1, #1 \n" | ||
391 | "ldr r1, [%[st]] \n" /* && not sleeping ? */ | ||
392 | "tsteq r1, r2, lsr %[oc] \n" | ||
393 | "beq 1b \n" /* Wait for sleep or wake */ | ||
394 | "tst r1, r2, lsr %[oc] \n" | ||
395 | "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */ | ||
396 | "movne r1, #0xce \n" | ||
397 | "strne r1, [r2, %[oc], lsl #2] \n" | ||
398 | "mov r1, #0 \n" /* Done with wake procedure */ | ||
399 | "strb r1, [%[sem], #0] \n" | ||
400 | "msr cpsr_c, r3 \n" /* Restore IRQ */ | ||
401 | : | ||
402 | : [sem]"r"(&core_semaphores[othercore]), | ||
403 | [st]"r"(&PROC_STAT), | ||
404 | [oc]"r"(othercore) | ||
405 | : "r1", "r2", "r3" | ||
406 | ); | ||
407 | #else /* C version for reference */ | ||
408 | /* Disable interrupts - avoid reentrancy from the tick */ | ||
409 | int oldlevel = disable_irq_save(); | ||
410 | |||
411 | /* Signal intent to wake other processor - set stay awake */ | ||
412 | core_semaphores[othercore].intend_wake = 1; | ||
413 | core_semaphores[othercore].stay_awake = 1; | ||
414 | |||
415 | /* If it intends to sleep, wait until it does or aborts */ | ||
416 | while (core_semaphores[othercore].intend_sleep != 0 && | ||
417 | (PROC_STAT & PROC_SLEEPING(othercore)) == 0); | ||
418 | |||
419 | /* If sleeping, wake it up */ | ||
420 | if (PROC_STAT & PROC_SLEEPING(othercore)) | ||
421 | wake_core(othercore); | ||
422 | |||
423 | /* Done with wake procedure */ | ||
424 | core_semaphores[othercore].intend_wake = 0; | ||
425 | restore_irq(oldlevel); | ||
426 | #endif /* ASM/C selection */ | ||
427 | } | ||
428 | #endif /* CPU type */ | ||
429 | |||
430 | #if NUM_CORES > 1 | ||
431 | /*--------------------------------------------------------------------------- | ||
432 | * Switches to a stack that always resides in the Rockbox core. | ||
433 | * | ||
434 | * Needed when a thread suicides on a core other than the main CPU since the | ||
435 | * stack used when idling is the stack of the last thread to run. This stack | ||
436 | * may not reside in the core firmware in which case the core will continue | ||
437 | * to use a stack from an unloaded module until another thread runs on it. | ||
438 | *--------------------------------------------------------------------------- | ||
439 | */ | ||
440 | static inline void switch_to_idle_stack(const unsigned int core) | ||
441 | { | ||
442 | asm volatile ( | ||
443 | "str sp, [%0] \n" /* save original stack pointer on idle stack */ | ||
444 | "mov sp, %0 \n" /* switch stacks */ | ||
445 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); | ||
446 | (void)core; | ||
447 | } | ||
448 | |||
449 | /*--------------------------------------------------------------------------- | ||
450 | * Perform core switch steps that need to take place inside switch_thread. | ||
451 | * | ||
452 | * These steps must take place while before changing the processor and after | ||
453 | * having entered switch_thread since switch_thread may not do a normal return | ||
454 | * because the stack being used for anything the compiler saved will not belong | ||
455 | * to the thread's destination core and it may have been recycled for other | ||
456 | * purposes by the time a normal context load has taken place. switch_thread | ||
457 | * will also clobber anything stashed in the thread's context or stored in the | ||
458 | * nonvolatile registers if it is saved there before the call since the | ||
459 | * compiler's order of operations cannot be known for certain. | ||
460 | */ | ||
461 | static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) | ||
462 | { | ||
463 | /* Flush our data to ram */ | ||
464 | cpucache_flush(); | ||
465 | /* Stash thread in r4 slot */ | ||
466 | thread->context.r[0] = (uint32_t)thread; | ||
467 | /* Stash restart address in r5 slot */ | ||
468 | thread->context.r[1] = thread->context.start; | ||
469 | /* Save sp in context.sp while still running on old core */ | ||
470 | thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1]; | ||
471 | } | ||
472 | |||
473 | /*--------------------------------------------------------------------------- | ||
474 | * Machine-specific helper function for switching the processor a thread is | ||
475 | * running on. Basically, the thread suicides on the departing core and is | ||
476 | * reborn on the destination. Were it not for gcc's ill-behavior regarding | ||
477 | * naked functions written in C where it actually clobbers non-volatile | ||
478 | * registers before the intended prologue code, this would all be much | ||
479 | * simpler. Generic setup is done in switch_core itself. | ||
480 | */ | ||
481 | |||
482 | /*--------------------------------------------------------------------------- | ||
483 | * This actually performs the core switch. | ||
484 | */ | ||
485 | static void __attribute__((naked)) | ||
486 | switch_thread_core(unsigned int core, struct thread_entry *thread) | ||
487 | { | ||
488 | /* Pure asm for this because compiler behavior isn't sufficiently predictable. | ||
489 | * Stack access also isn't permitted until restoring the original stack and | ||
490 | * context. */ | ||
491 | asm volatile ( | ||
492 | "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ | ||
493 | "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ | ||
494 | "ldr r2, [r2, r0, lsl #2] \n" | ||
495 | "add r2, r2, %0*4 \n" | ||
496 | "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ | ||
497 | "mov sp, r2 \n" /* switch stacks */ | ||
498 | "adr r2, 1f \n" /* r2 = new core restart address */ | ||
499 | "str r2, [r1, #40] \n" /* thread->context.start = r2 */ | ||
500 | "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ | ||
501 | "1: \n" | ||
502 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ | ||
503 | "mov r1, #0 \n" /* Clear start address */ | ||
504 | "str r1, [r0, #40] \n" | ||
505 | "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */ | ||
506 | "mov lr, pc \n" | ||
507 | "bx r0 \n" | ||
508 | "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ | ||
509 | ".ltorg \n" /* Dump constant pool */ | ||
510 | : : "i"(IDLE_STACK_WORDS) | ||
511 | ); | ||
512 | (void)core; (void)thread; | ||
513 | } | ||
514 | |||
515 | /*--------------------------------------------------------------------------- | ||
516 | * Do any device-specific inits for the threads and synchronize the kernel | ||
517 | * initializations. | ||
518 | *--------------------------------------------------------------------------- | ||
519 | */ | ||
520 | static void core_thread_init(unsigned int core) INIT_ATTR; | ||
521 | static void core_thread_init(unsigned int core) | ||
522 | { | ||
523 | if (core == CPU) | ||
524 | { | ||
525 | /* Wake up coprocessor and let it initialize kernel and threads */ | ||
526 | #ifdef CPU_PP502x | ||
527 | MBX_MSG_CLR = 0x3f; | ||
528 | #endif | ||
529 | wake_core(COP); | ||
530 | /* Sleep until COP has finished */ | ||
531 | sleep_core(CPU); | ||
532 | } | ||
533 | else | ||
534 | { | ||
535 | /* Wake the CPU and return */ | ||
536 | wake_core(CPU); | ||
537 | } | ||
538 | } | ||
539 | #endif /* NUM_CORES */ | ||
540 | |||
diff --git a/firmware/target/coldfire/thread-coldfire.c b/firmware/target/coldfire/thread-coldfire.c new file mode 100644 index 0000000000..f151a971c7 --- /dev/null +++ b/firmware/target/coldfire/thread-coldfire.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2004 by Linus Nielsen Feltzing | ||
11 | * | ||
12 | * Coldfire processor threading support | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version 2 | ||
17 | * of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
20 | * KIND, either express or implied. | ||
21 | * | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | /*--------------------------------------------------------------------------- | ||
25 | * Start the thread running and terminate it if it returns | ||
26 | *--------------------------------------------------------------------------- | ||
27 | */ | ||
28 | void start_thread(void); /* Provide C access to ASM label */ | ||
29 | static void __attribute__((used)) __start_thread(void) | ||
30 | { | ||
31 | /* a0=macsr, a1=context */ | ||
32 | asm volatile ( | ||
33 | "start_thread: \n" /* Start here - no naked attribute */ | ||
34 | "move.l %a0, %macsr \n" /* Set initial mac status reg */ | ||
35 | "lea.l 48(%a1), %a1 \n" | ||
36 | "move.l (%a1)+, %sp \n" /* Set initial stack */ | ||
37 | "move.l (%a1), %a2 \n" /* Fetch thread function pointer */ | ||
38 | "clr.l (%a1) \n" /* Mark thread running */ | ||
39 | "jsr (%a2) \n" /* Call thread function */ | ||
40 | ); | ||
41 | thread_exit(); | ||
42 | } | ||
43 | |||
44 | /* Set EMAC unit to fractional mode with saturation for each new thread, | ||
45 | * since that's what'll be the most useful for most things which the dsp | ||
46 | * will do. Codecs should still initialize their preferred modes | ||
47 | * explicitly. Context pointer is placed in d2 slot and start_thread | ||
48 | * pointer in d3 slot. thread function pointer is placed in context.start. | ||
49 | * See load_context for what happens when thread is initially going to | ||
50 | * run. | ||
51 | */ | ||
52 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
53 | ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \ | ||
54 | (thread)->context.d[0] = (uint32_t)&(thread)->context, \ | ||
55 | (thread)->context.d[1] = (uint32_t)start_thread, \ | ||
56 | (thread)->context.start = (uint32_t)(function); }) | ||
57 | |||
58 | /*--------------------------------------------------------------------------- | ||
59 | * Store non-volatile context. | ||
60 | *--------------------------------------------------------------------------- | ||
61 | */ | ||
62 | static inline void store_context(void* addr) | ||
63 | { | ||
64 | asm volatile ( | ||
65 | "move.l %%macsr,%%d0 \n" | ||
66 | "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n" | ||
67 | : : "a" (addr) : "d0" /* only! */ | ||
68 | ); | ||
69 | } | ||
70 | |||
71 | /*--------------------------------------------------------------------------- | ||
72 | * Load non-volatile context. | ||
73 | *--------------------------------------------------------------------------- | ||
74 | */ | ||
75 | static inline void load_context(const void* addr) | ||
76 | { | ||
77 | asm volatile ( | ||
78 | "move.l 52(%0), %%d0 \n" /* Get start address */ | ||
79 | "beq.b 1f \n" /* NULL -> already running */ | ||
80 | "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */ | ||
81 | "jmp (%%a2) \n" /* Start the thread */ | ||
82 | "1: \n" | ||
83 | "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */ | ||
84 | "move.l %%d0, %%macsr \n" | ||
85 | : : "a" (addr) : "d0" /* only! */ | ||
86 | ); | ||
87 | } | ||
88 | |||
89 | /*--------------------------------------------------------------------------- | ||
90 | * Put core in a power-saving state if waking list wasn't repopulated. | ||
91 | *--------------------------------------------------------------------------- | ||
92 | */ | ||
93 | static inline void core_sleep(void) | ||
94 | { | ||
95 | /* Supervisor mode, interrupts enabled upon wakeup */ | ||
96 | asm volatile ("stop #0x2000"); | ||
97 | }; | ||
diff --git a/firmware/target/mips/thread-mips32.c b/firmware/target/mips/thread-mips32.c new file mode 100644 index 0000000000..e2fccb8022 --- /dev/null +++ b/firmware/target/mips/thread-mips32.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2008 by Maurus Cuelenaere | ||
11 | * | ||
12 | * 32-bit MIPS threading support | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version 2 | ||
17 | * of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
20 | * KIND, either express or implied. | ||
21 | * | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | /*--------------------------------------------------------------------------- | ||
25 | * Start the thread running and terminate it if it returns | ||
26 | *--------------------------------------------------------------------------- | ||
27 | */ | ||
28 | |||
29 | void start_thread(void); /* Provide C access to ASM label */ | ||
30 | static void __attribute__((used)) _start_thread(void) | ||
31 | { | ||
32 | /* t1 = context */ | ||
33 | asm volatile ( | ||
34 | "start_thread: \n" | ||
35 | ".set noreorder \n" | ||
36 | ".set noat \n" | ||
37 | "lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */ | ||
38 | "lw $29, 36($9) \n" /* Set initial sp(=$29) */ | ||
39 | "jalr $8 \n" /* Start the thread */ | ||
40 | "sw $0, 44($9) \n" /* Clear start address */ | ||
41 | ".set at \n" | ||
42 | ".set reorder \n" | ||
43 | ); | ||
44 | thread_exit(); | ||
45 | } | ||
46 | |||
47 | /* Place context pointer in s0 slot, function pointer in s1 slot, and | ||
48 | * start_thread pointer in context_start */ | ||
49 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
50 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ | ||
51 | (thread)->context.r[1] = (uint32_t)(function), \ | ||
52 | (thread)->context.start = (uint32_t)start_thread; }) | ||
53 | |||
54 | /*--------------------------------------------------------------------------- | ||
55 | * Store non-volatile context. | ||
56 | *--------------------------------------------------------------------------- | ||
57 | */ | ||
58 | static inline void store_context(void* addr) | ||
59 | { | ||
60 | asm volatile ( | ||
61 | ".set noreorder \n" | ||
62 | ".set noat \n" | ||
63 | "sw $16, 0(%0) \n" /* s0 */ | ||
64 | "sw $17, 4(%0) \n" /* s1 */ | ||
65 | "sw $18, 8(%0) \n" /* s2 */ | ||
66 | "sw $19, 12(%0) \n" /* s3 */ | ||
67 | "sw $20, 16(%0) \n" /* s4 */ | ||
68 | "sw $21, 20(%0) \n" /* s5 */ | ||
69 | "sw $22, 24(%0) \n" /* s6 */ | ||
70 | "sw $23, 28(%0) \n" /* s7 */ | ||
71 | "sw $30, 32(%0) \n" /* fp */ | ||
72 | "sw $29, 36(%0) \n" /* sp */ | ||
73 | "sw $31, 40(%0) \n" /* ra */ | ||
74 | ".set at \n" | ||
75 | ".set reorder \n" | ||
76 | : : "r" (addr) | ||
77 | ); | ||
78 | } | ||
79 | |||
80 | /*--------------------------------------------------------------------------- | ||
81 | * Load non-volatile context. | ||
82 | *--------------------------------------------------------------------------- | ||
83 | */ | ||
84 | static inline void load_context(const void* addr) | ||
85 | { | ||
86 | asm volatile ( | ||
87 | ".set noat \n" | ||
88 | ".set noreorder \n" | ||
89 | "lw $8, 44(%0) \n" /* Get start address ($8 = t0) */ | ||
90 | "beqz $8, running \n" /* NULL -> already running */ | ||
91 | "nop \n" | ||
92 | "jr $8 \n" | ||
93 | "move $9, %0 \n" /* t1 = context */ | ||
94 | "running: \n" | ||
95 | "lw $16, 0(%0) \n" /* s0 */ | ||
96 | "lw $17, 4(%0) \n" /* s1 */ | ||
97 | "lw $18, 8(%0) \n" /* s2 */ | ||
98 | "lw $19, 12(%0) \n" /* s3 */ | ||
99 | "lw $20, 16(%0) \n" /* s4 */ | ||
100 | "lw $21, 20(%0) \n" /* s5 */ | ||
101 | "lw $22, 24(%0) \n" /* s6 */ | ||
102 | "lw $23, 28(%0) \n" /* s7 */ | ||
103 | "lw $30, 32(%0) \n" /* fp */ | ||
104 | "lw $29, 36(%0) \n" /* sp */ | ||
105 | "lw $31, 40(%0) \n" /* ra */ | ||
106 | ".set at \n" | ||
107 | ".set reorder \n" | ||
108 | : : "r" (addr) : "t0", "t1" | ||
109 | ); | ||
110 | } | ||
111 | |||
112 | /*--------------------------------------------------------------------------- | ||
113 | * Put core in a power-saving state. | ||
114 | *--------------------------------------------------------------------------- | ||
115 | */ | ||
116 | static inline void core_sleep(void) | ||
117 | { | ||
118 | #if CONFIG_CPU == JZ4732 | ||
119 | __cpm_idle_mode(); | ||
120 | #endif | ||
121 | asm volatile(".set mips32r2 \n" | ||
122 | "mfc0 $8, $12 \n" /* mfc t0, $12 */ | ||
123 | "move $9, $8 \n" /* move t1, t0 */ | ||
124 | "la $10, 0x8000000 \n" /* la t2, 0x8000000 */ | ||
125 | "or $8, $8, $10 \n" /* Enable reduced power mode */ | ||
126 | "mtc0 $8, $12 \n" /* mtc t0, $12 */ | ||
127 | "wait \n" | ||
128 | "mtc0 $9, $12 \n" /* mtc t1, $12 */ | ||
129 | ".set mips0 \n" | ||
130 | ::: "t0", "t1", "t2" | ||
131 | ); | ||
132 | enable_irq(); | ||
133 | } | ||
diff --git a/firmware/target/sh/thread-sh.c b/firmware/target/sh/thread-sh.c new file mode 100644 index 0000000000..25e0aadf96 --- /dev/null +++ b/firmware/target/sh/thread-sh.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Ulf Ralberg | ||
11 | * | ||
12 | * SH processor threading support | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version 2 | ||
17 | * of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
20 | * KIND, either express or implied. | ||
21 | * | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | /*--------------------------------------------------------------------------- | ||
25 | * Start the thread running and terminate it if it returns | ||
26 | *--------------------------------------------------------------------------- | ||
27 | */ | ||
28 | void start_thread(void); /* Provide C access to ASM label */ | ||
29 | static void __attribute__((used)) __start_thread(void) | ||
30 | { | ||
31 | /* r8 = context */ | ||
32 | asm volatile ( | ||
33 | "_start_thread: \n" /* Start here - no naked attribute */ | ||
34 | "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */ | ||
35 | "mov.l @(28, r8), r15 \n" /* Set initial sp */ | ||
36 | "mov #0, r1 \n" /* Start the thread */ | ||
37 | "jsr @r0 \n" | ||
38 | "mov.l r1, @(36, r8) \n" /* Clear start address */ | ||
39 | ); | ||
40 | thread_exit(); | ||
41 | } | ||
42 | |||
43 | /* Place context pointer in r8 slot, function pointer in r9 slot, and | ||
44 | * start_thread pointer in context_start */ | ||
45 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
46 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ | ||
47 | (thread)->context.r[1] = (uint32_t)(function), \ | ||
48 | (thread)->context.start = (uint32_t)start_thread; }) | ||
49 | |||
50 | /*--------------------------------------------------------------------------- | ||
51 | * Store non-volatile context. | ||
52 | *--------------------------------------------------------------------------- | ||
53 | */ | ||
54 | static inline void store_context(void* addr) | ||
55 | { | ||
56 | asm volatile ( | ||
57 | "add #36, %0 \n" /* Start at last reg. By the time routine */ | ||
58 | "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */ | ||
59 | "mov.l r15,@-%0 \n" | ||
60 | "mov.l r14,@-%0 \n" | ||
61 | "mov.l r13,@-%0 \n" | ||
62 | "mov.l r12,@-%0 \n" | ||
63 | "mov.l r11,@-%0 \n" | ||
64 | "mov.l r10,@-%0 \n" | ||
65 | "mov.l r9, @-%0 \n" | ||
66 | "mov.l r8, @-%0 \n" | ||
67 | : : "r" (addr) | ||
68 | ); | ||
69 | } | ||
70 | |||
71 | /*--------------------------------------------------------------------------- | ||
72 | * Load non-volatile context. | ||
73 | *--------------------------------------------------------------------------- | ||
74 | */ | ||
75 | static inline void load_context(const void* addr) | ||
76 | { | ||
77 | asm volatile ( | ||
78 | "mov.l @(36, %0), r0 \n" /* Get start address */ | ||
79 | "tst r0, r0 \n" | ||
80 | "bt .running \n" /* NULL -> already running */ | ||
81 | "jmp @r0 \n" /* r8 = context */ | ||
82 | ".running: \n" | ||
83 | "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */ | ||
84 | "mov.l @%0+, r9 \n" | ||
85 | "mov.l @%0+, r10 \n" | ||
86 | "mov.l @%0+, r11 \n" | ||
87 | "mov.l @%0+, r12 \n" | ||
88 | "mov.l @%0+, r13 \n" | ||
89 | "mov.l @%0+, r14 \n" | ||
90 | "mov.l @%0+, r15 \n" | ||
91 | "lds.l @%0+, pr \n" | ||
92 | : : "r" (addr) : "r0" /* only! */ | ||
93 | ); | ||
94 | } | ||
95 | |||
96 | /*--------------------------------------------------------------------------- | ||
97 | * Put core in a power-saving state. | ||
98 | *--------------------------------------------------------------------------- | ||
99 | */ | ||
100 | static inline void core_sleep(void) | ||
101 | { | ||
102 | asm volatile ( | ||
103 | "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */ | ||
104 | "mov #0, r1 \n" /* Enable interrupts */ | ||
105 | "ldc r1, sr \n" /* Following instruction cannot be interrupted */ | ||
106 | "sleep \n" /* Execute standby */ | ||
107 | : : "z"(&SBYCR-GBR) : "r1"); | ||
108 | } | ||
109 | |||
diff --git a/firmware/thread.c b/firmware/thread.c index 54d966ffe5..5cad67b657 100644 --- a/firmware/thread.c +++ b/firmware/thread.c | |||
@@ -160,892 +160,27 @@ void switch_thread(void) | |||
160 | __attribute__((noinline)); | 160 | __attribute__((noinline)); |
161 | 161 | ||
162 | /**************************************************************************** | 162 | /**************************************************************************** |
163 | * Processor-specific section | 163 | * Processor-specific section - include necessary core support |
164 | */ | 164 | */ |
165 | |||
166 | #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64 | ||
167 | /* Support a special workaround object for large-sector disks */ | ||
168 | #define IF_NO_SKIP_YIELD(...) __VA_ARGS__ | ||
169 | #else | ||
170 | #define IF_NO_SKIP_YIELD(...) | ||
171 | #endif | ||
172 | |||
173 | #if defined(CPU_ARM) | 165 | #if defined(CPU_ARM) |
174 | /*--------------------------------------------------------------------------- | 166 | #include "thread-arm.c" |
175 | * Start the thread running and terminate it if it returns | ||
176 | *--------------------------------------------------------------------------- | ||
177 | */ | ||
178 | static void __attribute__((naked,used)) start_thread(void) | ||
179 | { | ||
180 | /* r0 = context */ | ||
181 | asm volatile ( | ||
182 | "ldr sp, [r0, #32] \n" /* Load initial sp */ | ||
183 | "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */ | ||
184 | "mov r1, #0 \n" /* Mark thread as running */ | ||
185 | "str r1, [r0, #40] \n" | ||
186 | #if NUM_CORES > 1 | ||
187 | "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */ | ||
188 | "mov lr, pc \n" /* This could be the first entry into */ | ||
189 | "bx r0 \n" /* plugin or codec code for this core. */ | ||
190 | #endif | ||
191 | "mov lr, pc \n" /* Call thread function */ | ||
192 | "bx r4 \n" | ||
193 | ); /* No clobber list - new thread doesn't care */ | ||
194 | thread_exit(); | ||
195 | //asm volatile (".ltorg"); /* Dump constant pool */ | ||
196 | } | ||
197 | |||
198 | /* For startup, place context pointer in r4 slot, start_thread pointer in r5 | ||
199 | * slot, and thread function pointer in context.start. See load_context for | ||
200 | * what happens when thread is initially going to run. */ | ||
201 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
202 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ | ||
203 | (thread)->context.r[1] = (uint32_t)start_thread, \ | ||
204 | (thread)->context.start = (uint32_t)function; }) | ||
205 | |||
206 | /*--------------------------------------------------------------------------- | ||
207 | * Store non-volatile context. | ||
208 | *--------------------------------------------------------------------------- | ||
209 | */ | ||
210 | static inline void store_context(void* addr) | ||
211 | { | ||
212 | asm volatile( | ||
213 | "stmia %0, { r4-r11, sp, lr } \n" | ||
214 | : : "r" (addr) | ||
215 | ); | ||
216 | } | ||
217 | |||
218 | /*--------------------------------------------------------------------------- | ||
219 | * Load non-volatile context. | ||
220 | *--------------------------------------------------------------------------- | ||
221 | */ | ||
222 | static inline void load_context(const void* addr) | ||
223 | { | ||
224 | asm volatile( | ||
225 | "ldr r0, [%0, #40] \n" /* Load start pointer */ | ||
226 | "cmp r0, #0 \n" /* Check for NULL */ | ||
227 | "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */ | ||
228 | "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ | ||
229 | : : "r" (addr) : "r0" /* only! */ | ||
230 | ); | ||
231 | } | ||
232 | |||
233 | #if defined (CPU_PP) | 167 | #if defined (CPU_PP) |
234 | 168 | #include "thread-pp.c" | |
235 | #if NUM_CORES > 1 | 169 | #endif /* CPU_PP */ |
236 | extern uintptr_t cpu_idlestackbegin[]; | ||
237 | extern uintptr_t cpu_idlestackend[]; | ||
238 | extern uintptr_t cop_idlestackbegin[]; | ||
239 | extern uintptr_t cop_idlestackend[]; | ||
240 | static uintptr_t * const idle_stacks[NUM_CORES] = | ||
241 | { | ||
242 | [CPU] = cpu_idlestackbegin, | ||
243 | [COP] = cop_idlestackbegin | ||
244 | }; | ||
245 | |||
246 | #if CONFIG_CPU == PP5002 | ||
247 | /* Bytes to emulate the PP502x mailbox bits */ | ||
248 | struct core_semaphores | ||
249 | { | ||
250 | volatile uint8_t intend_wake; /* 00h */ | ||
251 | volatile uint8_t stay_awake; /* 01h */ | ||
252 | volatile uint8_t intend_sleep; /* 02h */ | ||
253 | volatile uint8_t unused; /* 03h */ | ||
254 | }; | ||
255 | |||
256 | static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR; | ||
257 | #endif /* CONFIG_CPU == PP5002 */ | ||
258 | |||
259 | #endif /* NUM_CORES */ | ||
260 | |||
261 | #if CONFIG_CORELOCK == SW_CORELOCK | ||
262 | /* Software core locks using Peterson's mutual exclusion algorithm */ | ||
263 | |||
264 | /*--------------------------------------------------------------------------- | ||
265 | * Initialize the corelock structure. | ||
266 | *--------------------------------------------------------------------------- | ||
267 | */ | ||
268 | void corelock_init(struct corelock *cl) | ||
269 | { | ||
270 | memset(cl, 0, sizeof (*cl)); | ||
271 | } | ||
272 | |||
273 | #if 1 /* Assembly locks to minimize overhead */ | ||
274 | /*--------------------------------------------------------------------------- | ||
275 | * Wait for the corelock to become free and acquire it when it does. | ||
276 | *--------------------------------------------------------------------------- | ||
277 | */ | ||
278 | void corelock_lock(struct corelock *cl) __attribute__((naked)); | ||
279 | void corelock_lock(struct corelock *cl) | ||
280 | { | ||
281 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
282 | asm volatile ( | ||
283 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
284 | "ldrb r1, [r1] \n" | ||
285 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
286 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
287 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
288 | "1: \n" | ||
289 | "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
290 | "cmp r3, #0 \n" /* yes? lock acquired */ | ||
291 | "bxeq lr \n" | ||
292 | "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ | ||
293 | "cmp r3, r1 \n" | ||
294 | "bxeq lr \n" /* yes? lock acquired */ | ||
295 | "b 1b \n" /* keep trying */ | ||
296 | : : "i"(&PROCESSOR_ID) | ||
297 | ); | ||
298 | (void)cl; | ||
299 | } | ||
300 | |||
301 | /*--------------------------------------------------------------------------- | ||
302 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
303 | *--------------------------------------------------------------------------- | ||
304 | */ | ||
305 | int corelock_try_lock(struct corelock *cl) __attribute__((naked)); | ||
306 | int corelock_try_lock(struct corelock *cl) | ||
307 | { | ||
308 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
309 | asm volatile ( | ||
310 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
311 | "ldrb r1, [r1] \n" | ||
312 | "mov r3, r0 \n" | ||
313 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
314 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
315 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
316 | "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
317 | "eors r0, r0, r2 \n" /* yes? lock acquired */ | ||
318 | "bxne lr \n" | ||
319 | "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ | ||
320 | "ands r0, r0, r1 \n" | ||
321 | "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ | ||
322 | "bx lr \n" /* return result */ | ||
323 | : : "i"(&PROCESSOR_ID) | ||
324 | ); | ||
325 | |||
326 | return 0; | ||
327 | (void)cl; | ||
328 | } | ||
329 | |||
330 | /*--------------------------------------------------------------------------- | ||
331 | * Release ownership of the corelock | ||
332 | *--------------------------------------------------------------------------- | ||
333 | */ | ||
334 | void corelock_unlock(struct corelock *cl) __attribute__((naked)); | ||
335 | void corelock_unlock(struct corelock *cl) | ||
336 | { | ||
337 | asm volatile ( | ||
338 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
339 | "ldrb r1, [r1] \n" | ||
340 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ | ||
341 | "strb r2, [r0, r1, lsr #7] \n" | ||
342 | "bx lr \n" | ||
343 | : : "i"(&PROCESSOR_ID) | ||
344 | ); | ||
345 | (void)cl; | ||
346 | } | ||
347 | #else /* C versions for reference */ | ||
348 | /*--------------------------------------------------------------------------- | ||
349 | * Wait for the corelock to become free and aquire it when it does. | ||
350 | *--------------------------------------------------------------------------- | ||
351 | */ | ||
352 | void corelock_lock(struct corelock *cl) | ||
353 | { | ||
354 | const unsigned int core = CURRENT_CORE; | ||
355 | const unsigned int othercore = 1 - core; | ||
356 | |||
357 | cl->myl[core] = core; | ||
358 | cl->turn = othercore; | ||
359 | |||
360 | for (;;) | ||
361 | { | ||
362 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
363 | break; | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /*--------------------------------------------------------------------------- | ||
368 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
369 | *--------------------------------------------------------------------------- | ||
370 | */ | ||
371 | int corelock_try_lock(struct corelock *cl) | ||
372 | { | ||
373 | const unsigned int core = CURRENT_CORE; | ||
374 | const unsigned int othercore = 1 - core; | ||
375 | |||
376 | cl->myl[core] = core; | ||
377 | cl->turn = othercore; | ||
378 | |||
379 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
380 | { | ||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | cl->myl[core] = 0; | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | /*--------------------------------------------------------------------------- | ||
389 | * Release ownership of the corelock | ||
390 | *--------------------------------------------------------------------------- | ||
391 | */ | ||
392 | void corelock_unlock(struct corelock *cl) | ||
393 | { | ||
394 | cl->myl[CURRENT_CORE] = 0; | ||
395 | } | ||
396 | #endif /* ASM / C selection */ | ||
397 | |||
398 | #endif /* CONFIG_CORELOCK == SW_CORELOCK */ | ||
399 | |||
400 | /*--------------------------------------------------------------------------- | ||
401 | * Put core in a power-saving state if waking list wasn't repopulated and if | ||
402 | * no other core requested a wakeup for it to perform a task. | ||
403 | *--------------------------------------------------------------------------- | ||
404 | */ | ||
405 | #ifdef CPU_PP502x | ||
406 | #if NUM_CORES == 1 | ||
407 | static inline void core_sleep(void) | ||
408 | { | ||
409 | sleep_core(CURRENT_CORE); | ||
410 | enable_irq(); | ||
411 | } | ||
412 | #else | ||
413 | static inline void core_sleep(unsigned int core) | ||
414 | { | ||
415 | #if 1 | ||
416 | asm volatile ( | ||
417 | "mov r0, #4 \n" /* r0 = 0x4 << core */ | ||
418 | "mov r0, r0, lsl %[c] \n" | ||
419 | "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ | ||
420 | "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */ | ||
421 | "tst r1, r0, lsl #2 \n" | ||
422 | "moveq r1, #0x80000000 \n" /* Then sleep */ | ||
423 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
424 | "moveq r1, #0 \n" /* Clear control reg */ | ||
425 | "streq r1, [%[ctl], %[c], lsl #2] \n" | ||
426 | "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */ | ||
427 | "str r1, [%[mbx], #8] \n" | ||
428 | "1: \n" /* Wait for wake procedure to finish */ | ||
429 | "ldr r1, [%[mbx], #0] \n" | ||
430 | "tst r1, r0, lsr #2 \n" | ||
431 | "bne 1b \n" | ||
432 | : | ||
433 | : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core) | ||
434 | : "r0", "r1"); | ||
435 | #else /* C version for reference */ | ||
436 | /* Signal intent to sleep */ | ||
437 | MBX_MSG_SET = 0x4 << core; | ||
438 | |||
439 | /* Something waking or other processor intends to wake us? */ | ||
440 | if ((MBX_MSG_STAT & (0x10 << core)) == 0) | ||
441 | { | ||
442 | sleep_core(core); | ||
443 | wake_core(core); | ||
444 | } | ||
445 | |||
446 | /* Signal wake - clear wake flag */ | ||
447 | MBX_MSG_CLR = 0x14 << core; | ||
448 | |||
449 | /* Wait for other processor to finish wake procedure */ | ||
450 | while (MBX_MSG_STAT & (0x1 << core)); | ||
451 | #endif /* ASM/C selection */ | ||
452 | enable_irq(); | ||
453 | } | ||
454 | #endif /* NUM_CORES */ | ||
455 | #elif CONFIG_CPU == PP5002 | ||
456 | #if NUM_CORES == 1 | ||
457 | static inline void core_sleep(void) | ||
458 | { | ||
459 | sleep_core(CURRENT_CORE); | ||
460 | enable_irq(); | ||
461 | } | ||
462 | #else | ||
463 | /* PP5002 has no mailboxes - emulate using bytes */ | ||
464 | static inline void core_sleep(unsigned int core) | ||
465 | { | ||
466 | #if 1 | ||
467 | asm volatile ( | ||
468 | "mov r0, #1 \n" /* Signal intent to sleep */ | ||
469 | "strb r0, [%[sem], #2] \n" | ||
470 | "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */ | ||
471 | "cmp r0, #0 \n" | ||
472 | "bne 2f \n" | ||
473 | /* Sleep: PP5002 crashes if the instruction that puts it to sleep is | ||
474 | * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure | ||
475 | * that the correct alternative is executed. Don't change the order | ||
476 | * of the next 4 instructions! */ | ||
477 | "tst pc, #0x0c \n" | ||
478 | "mov r0, #0xca \n" | ||
479 | "strne r0, [%[ctl], %[c], lsl #2] \n" | ||
480 | "streq r0, [%[ctl], %[c], lsl #2] \n" | ||
481 | "nop \n" /* nop's needed because of pipeline */ | ||
482 | "nop \n" | ||
483 | "nop \n" | ||
484 | "2: \n" | ||
485 | "mov r0, #0 \n" /* Clear stay_awake and sleep intent */ | ||
486 | "strb r0, [%[sem], #1] \n" | ||
487 | "strb r0, [%[sem], #2] \n" | ||
488 | "1: \n" /* Wait for wake procedure to finish */ | ||
489 | "ldrb r0, [%[sem], #0] \n" | ||
490 | "cmp r0, #0 \n" | ||
491 | "bne 1b \n" | ||
492 | : | ||
493 | : [sem]"r"(&core_semaphores[core]), [c]"r"(core), | ||
494 | [ctl]"r"(&CPU_CTL) | ||
495 | : "r0" | ||
496 | ); | ||
497 | #else /* C version for reference */ | ||
498 | /* Signal intent to sleep */ | ||
499 | core_semaphores[core].intend_sleep = 1; | ||
500 | |||
501 | /* Something waking or other processor intends to wake us? */ | ||
502 | if (core_semaphores[core].stay_awake == 0) | ||
503 | { | ||
504 | sleep_core(core); | ||
505 | } | ||
506 | |||
507 | /* Signal wake - clear wake flag */ | ||
508 | core_semaphores[core].stay_awake = 0; | ||
509 | core_semaphores[core].intend_sleep = 0; | ||
510 | |||
511 | /* Wait for other processor to finish wake procedure */ | ||
512 | while (core_semaphores[core].intend_wake != 0); | ||
513 | |||
514 | /* Enable IRQ */ | ||
515 | #endif /* ASM/C selection */ | ||
516 | enable_irq(); | ||
517 | } | ||
518 | #endif /* NUM_CORES */ | ||
519 | #endif /* PP CPU type */ | ||
520 | |||
521 | /*--------------------------------------------------------------------------- | ||
522 | * Wake another processor core that is sleeping or prevent it from doing so | ||
523 | * if it was already destined. FIQ, IRQ should be disabled before calling. | ||
524 | *--------------------------------------------------------------------------- | ||
525 | */ | ||
526 | #if NUM_CORES == 1 | ||
527 | /* Shared single-core build debugging version */ | ||
528 | void core_wake(void) | ||
529 | { | ||
530 | /* No wakey - core already wakey */ | ||
531 | } | ||
532 | #elif defined (CPU_PP502x) | ||
533 | void core_wake(unsigned int othercore) | ||
534 | { | ||
535 | #if 1 | ||
536 | /* avoid r0 since that contains othercore */ | ||
537 | asm volatile ( | ||
538 | "mrs r3, cpsr \n" /* Disable IRQ */ | ||
539 | "orr r1, r3, #0x80 \n" | ||
540 | "msr cpsr_c, r1 \n" | ||
541 | "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */ | ||
542 | "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */ | ||
543 | "str r2, [%[mbx], #4] \n" | ||
544 | "1: \n" /* If it intends to sleep, let it first */ | ||
545 | "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */ | ||
546 | "eor r1, r1, #0xc \n" | ||
547 | "tst r1, r2, lsr #2 \n" | ||
548 | "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */ | ||
549 | "tsteq r1, #0x80000000 \n" | ||
550 | "beq 1b \n" /* Wait for sleep or wake */ | ||
551 | "tst r1, #0x80000000 \n" /* If sleeping, wake it */ | ||
552 | "movne r1, #0x0 \n" | ||
553 | "strne r1, [%[ctl], %[oc], lsl #2] \n" | ||
554 | "mov r1, r2, lsr #4 \n" | ||
555 | "str r1, [%[mbx], #8] \n" /* Done with wake procedure */ | ||
556 | "msr cpsr_c, r3 \n" /* Restore IRQ */ | ||
557 | : | ||
558 | : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), | ||
559 | [oc]"r"(othercore) | ||
560 | : "r1", "r2", "r3"); | ||
561 | #else /* C version for reference */ | ||
562 | /* Disable interrupts - avoid reentrancy from the tick */ | ||
563 | int oldlevel = disable_irq_save(); | ||
564 | |||
565 | /* Signal intent to wake other processor - set stay awake */ | ||
566 | MBX_MSG_SET = 0x11 << othercore; | ||
567 | |||
568 | /* If it intends to sleep, wait until it does or aborts */ | ||
569 | while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 && | ||
570 | (PROC_CTL(othercore) & PROC_SLEEP) == 0); | ||
571 | |||
572 | /* If sleeping, wake it up */ | ||
573 | if (PROC_CTL(othercore) & PROC_SLEEP) | ||
574 | PROC_CTL(othercore) = 0; | ||
575 | |||
576 | /* Done with wake procedure */ | ||
577 | MBX_MSG_CLR = 0x1 << othercore; | ||
578 | restore_irq(oldlevel); | ||
579 | #endif /* ASM/C selection */ | ||
580 | } | ||
581 | #elif CONFIG_CPU == PP5002 | ||
582 | /* PP5002 has no mailboxes - emulate using bytes */ | ||
583 | void core_wake(unsigned int othercore) | ||
584 | { | ||
585 | #if 1 | ||
586 | /* avoid r0 since that contains othercore */ | ||
587 | asm volatile ( | ||
588 | "mrs r3, cpsr \n" /* Disable IRQ */ | ||
589 | "orr r1, r3, #0x80 \n" | ||
590 | "msr cpsr_c, r1 \n" | ||
591 | "mov r1, #1 \n" /* Signal intent to wake other core */ | ||
592 | "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */ | ||
593 | "strh r1, [%[sem], #0] \n" | ||
594 | "mov r2, #0x8000 \n" | ||
595 | "1: \n" /* If it intends to sleep, let it first */ | ||
596 | "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */ | ||
597 | "cmp r1, #1 \n" | ||
598 | "ldr r1, [%[st]] \n" /* && not sleeping ? */ | ||
599 | "tsteq r1, r2, lsr %[oc] \n" | ||
600 | "beq 1b \n" /* Wait for sleep or wake */ | ||
601 | "tst r1, r2, lsr %[oc] \n" | ||
602 | "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */ | ||
603 | "movne r1, #0xce \n" | ||
604 | "strne r1, [r2, %[oc], lsl #2] \n" | ||
605 | "mov r1, #0 \n" /* Done with wake procedure */ | ||
606 | "strb r1, [%[sem], #0] \n" | ||
607 | "msr cpsr_c, r3 \n" /* Restore IRQ */ | ||
608 | : | ||
609 | : [sem]"r"(&core_semaphores[othercore]), | ||
610 | [st]"r"(&PROC_STAT), | ||
611 | [oc]"r"(othercore) | ||
612 | : "r1", "r2", "r3" | ||
613 | ); | ||
614 | #else /* C version for reference */ | ||
615 | /* Disable interrupts - avoid reentrancy from the tick */ | ||
616 | int oldlevel = disable_irq_save(); | ||
617 | |||
618 | /* Signal intent to wake other processor - set stay awake */ | ||
619 | core_semaphores[othercore].intend_wake = 1; | ||
620 | core_semaphores[othercore].stay_awake = 1; | ||
621 | |||
622 | /* If it intends to sleep, wait until it does or aborts */ | ||
623 | while (core_semaphores[othercore].intend_sleep != 0 && | ||
624 | (PROC_STAT & PROC_SLEEPING(othercore)) == 0); | ||
625 | |||
626 | /* If sleeping, wake it up */ | ||
627 | if (PROC_STAT & PROC_SLEEPING(othercore)) | ||
628 | wake_core(othercore); | ||
629 | |||
630 | /* Done with wake procedure */ | ||
631 | core_semaphores[othercore].intend_wake = 0; | ||
632 | restore_irq(oldlevel); | ||
633 | #endif /* ASM/C selection */ | ||
634 | } | ||
635 | #endif /* CPU type */ | ||
636 | |||
637 | #if NUM_CORES > 1 | ||
638 | /*--------------------------------------------------------------------------- | ||
639 | * Switches to a stack that always resides in the Rockbox core. | ||
640 | * | ||
641 | * Needed when a thread suicides on a core other than the main CPU since the | ||
642 | * stack used when idling is the stack of the last thread to run. This stack | ||
643 | * may not reside in the core firmware in which case the core will continue | ||
644 | * to use a stack from an unloaded module until another thread runs on it. | ||
645 | *--------------------------------------------------------------------------- | ||
646 | */ | ||
647 | static inline void switch_to_idle_stack(const unsigned int core) | ||
648 | { | ||
649 | asm volatile ( | ||
650 | "str sp, [%0] \n" /* save original stack pointer on idle stack */ | ||
651 | "mov sp, %0 \n" /* switch stacks */ | ||
652 | : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); | ||
653 | (void)core; | ||
654 | } | ||
655 | |||
656 | /*--------------------------------------------------------------------------- | ||
657 | * Perform core switch steps that need to take place inside switch_thread. | ||
658 | * | ||
659 | * These steps must take place while before changing the processor and after | ||
660 | * having entered switch_thread since switch_thread may not do a normal return | ||
661 | * because the stack being used for anything the compiler saved will not belong | ||
662 | * to the thread's destination core and it may have been recycled for other | ||
663 | * purposes by the time a normal context load has taken place. switch_thread | ||
664 | * will also clobber anything stashed in the thread's context or stored in the | ||
665 | * nonvolatile registers if it is saved there before the call since the | ||
666 | * compiler's order of operations cannot be known for certain. | ||
667 | */ | ||
668 | static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) | ||
669 | { | ||
670 | /* Flush our data to ram */ | ||
671 | cpucache_flush(); | ||
672 | /* Stash thread in r4 slot */ | ||
673 | thread->context.r[0] = (uint32_t)thread; | ||
674 | /* Stash restart address in r5 slot */ | ||
675 | thread->context.r[1] = thread->context.start; | ||
676 | /* Save sp in context.sp while still running on old core */ | ||
677 | thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1]; | ||
678 | } | ||
679 | |||
680 | /*--------------------------------------------------------------------------- | ||
681 | * Machine-specific helper function for switching the processor a thread is | ||
682 | * running on. Basically, the thread suicides on the departing core and is | ||
683 | * reborn on the destination. Were it not for gcc's ill-behavior regarding | ||
684 | * naked functions written in C where it actually clobbers non-volatile | ||
685 | * registers before the intended prologue code, this would all be much | ||
686 | * simpler. Generic setup is done in switch_core itself. | ||
687 | */ | ||
688 | |||
689 | /*--------------------------------------------------------------------------- | ||
690 | * This actually performs the core switch. | ||
691 | */ | ||
692 | static void __attribute__((naked)) | ||
693 | switch_thread_core(unsigned int core, struct thread_entry *thread) | ||
694 | { | ||
695 | /* Pure asm for this because compiler behavior isn't sufficiently predictable. | ||
696 | * Stack access also isn't permitted until restoring the original stack and | ||
697 | * context. */ | ||
698 | asm volatile ( | ||
699 | "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ | ||
700 | "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ | ||
701 | "ldr r2, [r2, r0, lsl #2] \n" | ||
702 | "add r2, r2, %0*4 \n" | ||
703 | "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ | ||
704 | "mov sp, r2 \n" /* switch stacks */ | ||
705 | "adr r2, 1f \n" /* r2 = new core restart address */ | ||
706 | "str r2, [r1, #40] \n" /* thread->context.start = r2 */ | ||
707 | "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ | ||
708 | "1: \n" | ||
709 | "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ | ||
710 | "mov r1, #0 \n" /* Clear start address */ | ||
711 | "str r1, [r0, #40] \n" | ||
712 | "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */ | ||
713 | "mov lr, pc \n" | ||
714 | "bx r0 \n" | ||
715 | "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ | ||
716 | ".ltorg \n" /* Dump constant pool */ | ||
717 | : : "i"(IDLE_STACK_WORDS) | ||
718 | ); | ||
719 | (void)core; (void)thread; | ||
720 | } | ||
721 | |||
722 | /*--------------------------------------------------------------------------- | ||
723 | * Do any device-specific inits for the threads and synchronize the kernel | ||
724 | * initializations. | ||
725 | *--------------------------------------------------------------------------- | ||
726 | */ | ||
727 | static void core_thread_init(unsigned int core) INIT_ATTR; | ||
728 | static void core_thread_init(unsigned int core) | ||
729 | { | ||
730 | if (core == CPU) | ||
731 | { | ||
732 | /* Wake up coprocessor and let it initialize kernel and threads */ | ||
733 | #ifdef CPU_PP502x | ||
734 | MBX_MSG_CLR = 0x3f; | ||
735 | #endif | ||
736 | wake_core(COP); | ||
737 | /* Sleep until COP has finished */ | ||
738 | sleep_core(CPU); | ||
739 | } | ||
740 | else | ||
741 | { | ||
742 | /* Wake the CPU and return */ | ||
743 | wake_core(CPU); | ||
744 | } | ||
745 | } | ||
746 | #endif /* NUM_CORES */ | ||
747 | |||
748 | #elif defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \ | ||
749 | || CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \ | ||
750 | || CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 | ||
751 | /* Use the generic ARMv4/v5/v6 wait for IRQ */ | ||
752 | static inline void core_sleep(void) | ||
753 | { | ||
754 | asm volatile ( | ||
755 | "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */ | ||
756 | #if CONFIG_CPU == IMX31L | ||
757 | "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */ | ||
758 | #endif | ||
759 | : : "r"(0) | ||
760 | ); | ||
761 | enable_irq(); | ||
762 | } | ||
763 | #else | ||
764 | static inline void core_sleep(void) | ||
765 | { | ||
766 | #warning core_sleep not implemented, battery life will be decreased | ||
767 | enable_irq(); | ||
768 | } | ||
769 | #endif /* CONFIG_CPU == */ | ||
770 | |||
771 | #elif defined(CPU_COLDFIRE) | 170 | #elif defined(CPU_COLDFIRE) |
772 | /*--------------------------------------------------------------------------- | 171 | #include "thread-coldfire.c" |
773 | * Start the thread running and terminate it if it returns | ||
774 | *--------------------------------------------------------------------------- | ||
775 | */ | ||
776 | void start_thread(void); /* Provide C access to ASM label */ | ||
777 | static void __attribute__((used)) __start_thread(void) | ||
778 | { | ||
779 | /* a0=macsr, a1=context */ | ||
780 | asm volatile ( | ||
781 | "start_thread: \n" /* Start here - no naked attribute */ | ||
782 | "move.l %a0, %macsr \n" /* Set initial mac status reg */ | ||
783 | "lea.l 48(%a1), %a1 \n" | ||
784 | "move.l (%a1)+, %sp \n" /* Set initial stack */ | ||
785 | "move.l (%a1), %a2 \n" /* Fetch thread function pointer */ | ||
786 | "clr.l (%a1) \n" /* Mark thread running */ | ||
787 | "jsr (%a2) \n" /* Call thread function */ | ||
788 | ); | ||
789 | thread_exit(); | ||
790 | } | ||
791 | |||
792 | /* Set EMAC unit to fractional mode with saturation for each new thread, | ||
793 | * since that's what'll be the most useful for most things which the dsp | ||
794 | * will do. Codecs should still initialize their preferred modes | ||
795 | * explicitly. Context pointer is placed in d2 slot and start_thread | ||
796 | * pointer in d3 slot. thread function pointer is placed in context.start. | ||
797 | * See load_context for what happens when thread is initially going to | ||
798 | * run. | ||
799 | */ | ||
800 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
801 | ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \ | ||
802 | (thread)->context.d[0] = (uint32_t)&(thread)->context, \ | ||
803 | (thread)->context.d[1] = (uint32_t)start_thread, \ | ||
804 | (thread)->context.start = (uint32_t)(function); }) | ||
805 | |||
806 | /*--------------------------------------------------------------------------- | ||
807 | * Store non-volatile context. | ||
808 | *--------------------------------------------------------------------------- | ||
809 | */ | ||
810 | static inline void store_context(void* addr) | ||
811 | { | ||
812 | asm volatile ( | ||
813 | "move.l %%macsr,%%d0 \n" | ||
814 | "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n" | ||
815 | : : "a" (addr) : "d0" /* only! */ | ||
816 | ); | ||
817 | } | ||
818 | |||
819 | /*--------------------------------------------------------------------------- | ||
820 | * Load non-volatile context. | ||
821 | *--------------------------------------------------------------------------- | ||
822 | */ | ||
823 | static inline void load_context(const void* addr) | ||
824 | { | ||
825 | asm volatile ( | ||
826 | "move.l 52(%0), %%d0 \n" /* Get start address */ | ||
827 | "beq.b 1f \n" /* NULL -> already running */ | ||
828 | "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */ | ||
829 | "jmp (%%a2) \n" /* Start the thread */ | ||
830 | "1: \n" | ||
831 | "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */ | ||
832 | "move.l %%d0, %%macsr \n" | ||
833 | : : "a" (addr) : "d0" /* only! */ | ||
834 | ); | ||
835 | } | ||
836 | |||
837 | /*--------------------------------------------------------------------------- | ||
838 | * Put core in a power-saving state if waking list wasn't repopulated. | ||
839 | *--------------------------------------------------------------------------- | ||
840 | */ | ||
841 | static inline void core_sleep(void) | ||
842 | { | ||
843 | /* Supervisor mode, interrupts enabled upon wakeup */ | ||
844 | asm volatile ("stop #0x2000"); | ||
845 | }; | ||
846 | |||
847 | #elif CONFIG_CPU == SH7034 | 172 | #elif CONFIG_CPU == SH7034 |
848 | /*--------------------------------------------------------------------------- | 173 | #include "thread-sh.c" |
849 | * Start the thread running and terminate it if it returns | ||
850 | *--------------------------------------------------------------------------- | ||
851 | */ | ||
852 | void start_thread(void); /* Provide C access to ASM label */ | ||
853 | static void __attribute__((used)) __start_thread(void) | ||
854 | { | ||
855 | /* r8 = context */ | ||
856 | asm volatile ( | ||
857 | "_start_thread: \n" /* Start here - no naked attribute */ | ||
858 | "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */ | ||
859 | "mov.l @(28, r8), r15 \n" /* Set initial sp */ | ||
860 | "mov #0, r1 \n" /* Start the thread */ | ||
861 | "jsr @r0 \n" | ||
862 | "mov.l r1, @(36, r8) \n" /* Clear start address */ | ||
863 | ); | ||
864 | thread_exit(); | ||
865 | } | ||
866 | |||
867 | /* Place context pointer in r8 slot, function pointer in r9 slot, and | ||
868 | * start_thread pointer in context_start */ | ||
869 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
870 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ | ||
871 | (thread)->context.r[1] = (uint32_t)(function), \ | ||
872 | (thread)->context.start = (uint32_t)start_thread; }) | ||
873 | |||
874 | /*--------------------------------------------------------------------------- | ||
875 | * Store non-volatile context. | ||
876 | *--------------------------------------------------------------------------- | ||
877 | */ | ||
878 | static inline void store_context(void* addr) | ||
879 | { | ||
880 | asm volatile ( | ||
881 | "add #36, %0 \n" /* Start at last reg. By the time routine */ | ||
882 | "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */ | ||
883 | "mov.l r15,@-%0 \n" | ||
884 | "mov.l r14,@-%0 \n" | ||
885 | "mov.l r13,@-%0 \n" | ||
886 | "mov.l r12,@-%0 \n" | ||
887 | "mov.l r11,@-%0 \n" | ||
888 | "mov.l r10,@-%0 \n" | ||
889 | "mov.l r9, @-%0 \n" | ||
890 | "mov.l r8, @-%0 \n" | ||
891 | : : "r" (addr) | ||
892 | ); | ||
893 | } | ||
894 | |||
895 | /*--------------------------------------------------------------------------- | ||
896 | * Load non-volatile context. | ||
897 | *--------------------------------------------------------------------------- | ||
898 | */ | ||
899 | static inline void load_context(const void* addr) | ||
900 | { | ||
901 | asm volatile ( | ||
902 | "mov.l @(36, %0), r0 \n" /* Get start address */ | ||
903 | "tst r0, r0 \n" | ||
904 | "bt .running \n" /* NULL -> already running */ | ||
905 | "jmp @r0 \n" /* r8 = context */ | ||
906 | ".running: \n" | ||
907 | "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */ | ||
908 | "mov.l @%0+, r9 \n" | ||
909 | "mov.l @%0+, r10 \n" | ||
910 | "mov.l @%0+, r11 \n" | ||
911 | "mov.l @%0+, r12 \n" | ||
912 | "mov.l @%0+, r13 \n" | ||
913 | "mov.l @%0+, r14 \n" | ||
914 | "mov.l @%0+, r15 \n" | ||
915 | "lds.l @%0+, pr \n" | ||
916 | : : "r" (addr) : "r0" /* only! */ | ||
917 | ); | ||
918 | } | ||
919 | |||
920 | /*--------------------------------------------------------------------------- | ||
921 | * Put core in a power-saving state. | ||
922 | *--------------------------------------------------------------------------- | ||
923 | */ | ||
924 | static inline void core_sleep(void) | ||
925 | { | ||
926 | asm volatile ( | ||
927 | "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */ | ||
928 | "mov #0, r1 \n" /* Enable interrupts */ | ||
929 | "ldc r1, sr \n" /* Following instruction cannot be interrupted */ | ||
930 | "sleep \n" /* Execute standby */ | ||
931 | : : "z"(&SBYCR-GBR) : "r1"); | ||
932 | } | ||
933 | |||
934 | #elif defined(CPU_MIPS) && CPU_MIPS == 32 | 174 | #elif defined(CPU_MIPS) && CPU_MIPS == 32 |
175 | #include "thread-mips32.c" | ||
176 | #else | ||
177 | /* Wouldn't compile anyway */ | ||
178 | #error Processor not implemented. | ||
179 | #endif /* CONFIG_CPU == */ | ||
935 | 180 | ||
936 | /*--------------------------------------------------------------------------- | 181 | #ifndef IF_NO_SKIP_YIELD |
937 | * Start the thread running and terminate it if it returns | 182 | #define IF_NO_SKIP_YIELD(...) |
938 | *--------------------------------------------------------------------------- | ||
939 | */ | ||
940 | |||
941 | void start_thread(void); /* Provide C access to ASM label */ | ||
942 | static void __attribute__((used)) _start_thread(void) | ||
943 | { | ||
944 | /* t1 = context */ | ||
945 | asm volatile ( | ||
946 | "start_thread: \n" | ||
947 | ".set noreorder \n" | ||
948 | ".set noat \n" | ||
949 | "lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */ | ||
950 | "lw $29, 36($9) \n" /* Set initial sp(=$29) */ | ||
951 | "jalr $8 \n" /* Start the thread */ | ||
952 | "sw $0, 44($9) \n" /* Clear start address */ | ||
953 | ".set at \n" | ||
954 | ".set reorder \n" | ||
955 | ); | ||
956 | thread_exit(); | ||
957 | } | ||
958 | |||
959 | /* Place context pointer in s0 slot, function pointer in s1 slot, and | ||
960 | * start_thread pointer in context_start */ | ||
961 | #define THREAD_STARTUP_INIT(core, thread, function) \ | ||
962 | ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ | ||
963 | (thread)->context.r[1] = (uint32_t)(function), \ | ||
964 | (thread)->context.start = (uint32_t)start_thread; }) | ||
965 | |||
966 | /*--------------------------------------------------------------------------- | ||
967 | * Store non-volatile context. | ||
968 | *--------------------------------------------------------------------------- | ||
969 | */ | ||
970 | static inline void store_context(void* addr) | ||
971 | { | ||
972 | asm volatile ( | ||
973 | ".set noreorder \n" | ||
974 | ".set noat \n" | ||
975 | "sw $16, 0(%0) \n" /* s0 */ | ||
976 | "sw $17, 4(%0) \n" /* s1 */ | ||
977 | "sw $18, 8(%0) \n" /* s2 */ | ||
978 | "sw $19, 12(%0) \n" /* s3 */ | ||
979 | "sw $20, 16(%0) \n" /* s4 */ | ||
980 | "sw $21, 20(%0) \n" /* s5 */ | ||
981 | "sw $22, 24(%0) \n" /* s6 */ | ||
982 | "sw $23, 28(%0) \n" /* s7 */ | ||
983 | "sw $30, 32(%0) \n" /* fp */ | ||
984 | "sw $29, 36(%0) \n" /* sp */ | ||
985 | "sw $31, 40(%0) \n" /* ra */ | ||
986 | ".set at \n" | ||
987 | ".set reorder \n" | ||
988 | : : "r" (addr) | ||
989 | ); | ||
990 | } | ||
991 | |||
992 | /*--------------------------------------------------------------------------- | ||
993 | * Load non-volatile context. | ||
994 | *--------------------------------------------------------------------------- | ||
995 | */ | ||
996 | static inline void load_context(const void* addr) | ||
997 | { | ||
998 | asm volatile ( | ||
999 | ".set noat \n" | ||
1000 | ".set noreorder \n" | ||
1001 | "lw $8, 44(%0) \n" /* Get start address ($8 = t0) */ | ||
1002 | "beqz $8, running \n" /* NULL -> already running */ | ||
1003 | "nop \n" | ||
1004 | "jr $8 \n" | ||
1005 | "move $9, %0 \n" /* t1 = context */ | ||
1006 | "running: \n" | ||
1007 | "lw $16, 0(%0) \n" /* s0 */ | ||
1008 | "lw $17, 4(%0) \n" /* s1 */ | ||
1009 | "lw $18, 8(%0) \n" /* s2 */ | ||
1010 | "lw $19, 12(%0) \n" /* s3 */ | ||
1011 | "lw $20, 16(%0) \n" /* s4 */ | ||
1012 | "lw $21, 20(%0) \n" /* s5 */ | ||
1013 | "lw $22, 24(%0) \n" /* s6 */ | ||
1014 | "lw $23, 28(%0) \n" /* s7 */ | ||
1015 | "lw $30, 32(%0) \n" /* fp */ | ||
1016 | "lw $29, 36(%0) \n" /* sp */ | ||
1017 | "lw $31, 40(%0) \n" /* ra */ | ||
1018 | ".set at \n" | ||
1019 | ".set reorder \n" | ||
1020 | : : "r" (addr) : "t0", "t1" | ||
1021 | ); | ||
1022 | } | ||
1023 | |||
1024 | /*--------------------------------------------------------------------------- | ||
1025 | * Put core in a power-saving state. | ||
1026 | *--------------------------------------------------------------------------- | ||
1027 | */ | ||
1028 | static inline void core_sleep(void) | ||
1029 | { | ||
1030 | #if CONFIG_CPU == JZ4732 | ||
1031 | __cpm_idle_mode(); | ||
1032 | #endif | 183 | #endif |
1033 | asm volatile(".set mips32r2 \n" | ||
1034 | "mfc0 $8, $12 \n" /* mfc t0, $12 */ | ||
1035 | "move $9, $8 \n" /* move t1, t0 */ | ||
1036 | "la $10, 0x8000000 \n" /* la t2, 0x8000000 */ | ||
1037 | "or $8, $8, $10 \n" /* Enable reduced power mode */ | ||
1038 | "mtc0 $8, $12 \n" /* mtc t0, $12 */ | ||
1039 | "wait \n" | ||
1040 | "mtc0 $9, $12 \n" /* mtc t1, $12 */ | ||
1041 | ".set mips0 \n" | ||
1042 | ::: "t0", "t1", "t2" | ||
1043 | ); | ||
1044 | enable_irq(); | ||
1045 | } | ||
1046 | |||
1047 | |||
1048 | #endif /* CONFIG_CPU == */ | ||
1049 | 184 | ||
1050 | /* | 185 | /* |
1051 | * End Processor-specific section | 186 | * End Processor-specific section |