From 555ad6710fd897bfc12549197b606c90b06000b4 Mon Sep 17 00:00:00 2001 From: Michael Sevakis Date: Wed, 2 Jun 2010 12:45:36 +0000 Subject: Threading: Split processor support code into respective target files. C files from /target/xxx are included into thread.c because of essential inlining and files are code, not declarations. Copyrights in each new file go to whoever implemented the first functional support. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26479 a1c6a512-1295-4272-9138-f99709370657 --- firmware/target/arm/thread-arm.c | 112 ++++++ firmware/target/arm/thread-pp.c | 540 +++++++++++++++++++++++++++++ firmware/target/coldfire/thread-coldfire.c | 97 ++++++ firmware/target/mips/thread-mips32.c | 133 +++++++ firmware/target/sh/thread-sh.c | 109 ++++++ 5 files changed, 991 insertions(+) create mode 100644 firmware/target/arm/thread-arm.c create mode 100644 firmware/target/arm/thread-pp.c create mode 100644 firmware/target/coldfire/thread-coldfire.c create mode 100644 firmware/target/mips/thread-mips32.c create mode 100644 firmware/target/sh/thread-sh.c (limited to 'firmware/target') diff --git a/firmware/target/arm/thread-arm.c b/firmware/target/arm/thread-arm.c new file mode 100644 index 0000000000..c2d91cec25 --- /dev/null +++ b/firmware/target/arm/thread-arm.c @@ -0,0 +1,112 @@ +/*************************************************************************** + * __________ __ ___. + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ + * \/ \/ \/ \/ \/ + * $Id$ + * + * Copyright (C) 2005 by Thom Johansen + * + * Generic ARM threading support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ****************************************************************************/ + +/*--------------------------------------------------------------------------- + * Start the thread running and terminate it if it returns + *--------------------------------------------------------------------------- + */ +static void __attribute__((naked,used)) start_thread(void) +{ + /* r0 = context */ + asm volatile ( + "ldr sp, [r0, #32] \n" /* Load initial sp */ + "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */ + "mov r1, #0 \n" /* Mark thread as running */ + "str r1, [r0, #40] \n" +#if NUM_CORES > 1 + "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */ + "mov lr, pc \n" /* This could be the first entry into */ + "bx r0 \n" /* plugin or codec code for this core. */ +#endif + "mov lr, pc \n" /* Call thread function */ + "bx r4 \n" + ); /* No clobber list - new thread doesn't care */ + thread_exit(); +#if 0 + asm volatile (".ltorg"); /* Dump constant pool */ +#endif +} + +/* For startup, place context pointer in r4 slot, start_thread pointer in r5 + * slot, and thread function pointer in context.start. See load_context for + * what happens when thread is initially going to run. */ +#define THREAD_STARTUP_INIT(core, thread, function) \ + ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ + (thread)->context.r[1] = (uint32_t)start_thread, \ + (thread)->context.start = (uint32_t)function; }) + + +/*--------------------------------------------------------------------------- + * Store non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void store_context(void* addr) +{ + asm volatile( + "stmia %0, { r4-r11, sp, lr } \n" + : : "r" (addr) + ); +} + +/*--------------------------------------------------------------------------- + * Load non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void load_context(const void* addr) +{ + asm volatile( + "ldr r0, [%0, #40] \n" /* Load start pointer */ + "cmp r0, #0 \n" /* Check for NULL */ + "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */ + "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ + : : "r" (addr) : "r0" /* only! */ + ); +} + +#if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \ +|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \ +|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2 +/* Use the generic ARMv4/v5/v6 wait for IRQ */ +static inline void core_sleep(void) +{ + asm volatile ( + "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */ +#if CONFIG_CPU == IMX31L + "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */ +#endif + : : "r"(0) + ); + enable_irq(); +} +#else +/* Skip this if special code is required and implemented */ +#ifndef CPU_PP +static inline void core_sleep(void) +{ + #warning core_sleep not implemented, battery life will be decreased + enable_irq(); +} +#endif /* CPU_PP */ +#endif + + diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c new file mode 100644 index 0000000000..20105ccb59 --- /dev/null +++ b/firmware/target/arm/thread-pp.c @@ -0,0 +1,540 @@ +/*************************************************************************** + * __________ __ ___. + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ + * \/ \/ \/ \/ \/ + * $Id$ + * + * Copyright (C) 2007 by Daniel Ankers + * + * PP5002 and PP502x SoC threading support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ****************************************************************************/ + +#if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64 +/* Support a special workaround object for large-sector disks */ +#define IF_NO_SKIP_YIELD(...) __VA_ARGS__ +#endif + +#if NUM_CORES > 1 +extern uintptr_t cpu_idlestackbegin[]; +extern uintptr_t cpu_idlestackend[]; +extern uintptr_t cop_idlestackbegin[]; +extern uintptr_t cop_idlestackend[]; +static uintptr_t * const idle_stacks[NUM_CORES] = +{ + [CPU] = cpu_idlestackbegin, + [COP] = cop_idlestackbegin +}; + +#if CONFIG_CPU == PP5002 +/* Bytes to emulate the PP502x mailbox bits */ +struct core_semaphores +{ + volatile uint8_t intend_wake; /* 00h */ + volatile uint8_t stay_awake; /* 01h */ + volatile uint8_t intend_sleep; /* 02h */ + volatile uint8_t unused; /* 03h */ +}; + +static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR; +#endif /* CONFIG_CPU == PP5002 */ + +#endif /* NUM_CORES */ + +#if CONFIG_CORELOCK == SW_CORELOCK +/* Software core locks using Peterson's mutual exclusion algorithm */ + +/*--------------------------------------------------------------------------- + * Initialize the corelock structure. + *--------------------------------------------------------------------------- + */ +void corelock_init(struct corelock *cl) +{ + memset(cl, 0, sizeof (*cl)); +} + +#if 1 /* Assembly locks to minimize overhead */ +/*--------------------------------------------------------------------------- + * Wait for the corelock to become free and acquire it when it does. + *--------------------------------------------------------------------------- + */ +void corelock_lock(struct corelock *cl) __attribute__((naked)); +void corelock_lock(struct corelock *cl) +{ + /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ + asm volatile ( + "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ + "ldrb r1, [r1] \n" + "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ + "eor r2, r1, #0xff \n" /* r2 = othercore */ + "strb r2, [r0, #2] \n" /* cl->turn = othercore */ + "1: \n" + "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ + "cmp r3, #0 \n" /* yes? lock acquired */ + "bxeq lr \n" + "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ + "cmp r3, r1 \n" + "bxeq lr \n" /* yes? lock acquired */ + "b 1b \n" /* keep trying */ + : : "i"(&PROCESSOR_ID) + ); + (void)cl; +} + +/*--------------------------------------------------------------------------- + * Try to aquire the corelock. If free, caller gets it, otherwise return 0. + *--------------------------------------------------------------------------- + */ +int corelock_try_lock(struct corelock *cl) __attribute__((naked)); +int corelock_try_lock(struct corelock *cl) +{ + /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ + asm volatile ( + "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ + "ldrb r1, [r1] \n" + "mov r3, r0 \n" + "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ + "eor r2, r1, #0xff \n" /* r2 = othercore */ + "strb r2, [r0, #2] \n" /* cl->turn = othercore */ + "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ + "eors r0, r0, r2 \n" /* yes? lock acquired */ + "bxne lr \n" + "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ + "ands r0, r0, r1 \n" + "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ + "bx lr \n" /* return result */ + : : "i"(&PROCESSOR_ID) + ); + + return 0; + (void)cl; +} + +/*--------------------------------------------------------------------------- + * Release ownership of the corelock + *--------------------------------------------------------------------------- + */ +void corelock_unlock(struct corelock *cl) __attribute__((naked)); +void corelock_unlock(struct corelock *cl) +{ + asm volatile ( + "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ + "ldrb r1, [r1] \n" + "mov r2, #0 \n" /* cl->myl[core] = 0 */ + "strb r2, [r0, r1, lsr #7] \n" + "bx lr \n" + : : "i"(&PROCESSOR_ID) + ); + (void)cl; +} +#else /* C versions for reference */ +/*--------------------------------------------------------------------------- + * Wait for the corelock to become free and aquire it when it does. + *--------------------------------------------------------------------------- + */ +void corelock_lock(struct corelock *cl) +{ + const unsigned int core = CURRENT_CORE; + const unsigned int othercore = 1 - core; + + cl->myl[core] = core; + cl->turn = othercore; + + for (;;) + { + if (cl->myl[othercore] == 0 || cl->turn == core) + break; + } +} + +/*--------------------------------------------------------------------------- + * Try to aquire the corelock. If free, caller gets it, otherwise return 0. + *--------------------------------------------------------------------------- + */ +int corelock_try_lock(struct corelock *cl) +{ + const unsigned int core = CURRENT_CORE; + const unsigned int othercore = 1 - core; + + cl->myl[core] = core; + cl->turn = othercore; + + if (cl->myl[othercore] == 0 || cl->turn == core) + { + return 1; + } + + cl->myl[core] = 0; + return 0; +} + +/*--------------------------------------------------------------------------- + * Release ownership of the corelock + *--------------------------------------------------------------------------- + */ +void corelock_unlock(struct corelock *cl) +{ + cl->myl[CURRENT_CORE] = 0; +} +#endif /* ASM / C selection */ + +#endif /* CONFIG_CORELOCK == SW_CORELOCK */ + +/*--------------------------------------------------------------------------- + * Put core in a power-saving state if waking list wasn't repopulated and if + * no other core requested a wakeup for it to perform a task. + *--------------------------------------------------------------------------- + */ +#ifdef CPU_PP502x +#if NUM_CORES == 1 +static inline void core_sleep(void) +{ + sleep_core(CURRENT_CORE); + enable_irq(); +} +#else +static inline void core_sleep(unsigned int core) +{ +#if 1 + asm volatile ( + "mov r0, #4 \n" /* r0 = 0x4 << core */ + "mov r0, r0, lsl %[c] \n" + "str r0, [%[mbx], #4] \n" /* signal intent to sleep */ + "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10< 1 +/*--------------------------------------------------------------------------- + * Switches to a stack that always resides in the Rockbox core. + * + * Needed when a thread suicides on a core other than the main CPU since the + * stack used when idling is the stack of the last thread to run. This stack + * may not reside in the core firmware in which case the core will continue + * to use a stack from an unloaded module until another thread runs on it. + *--------------------------------------------------------------------------- + */ +static inline void switch_to_idle_stack(const unsigned int core) +{ + asm volatile ( + "str sp, [%0] \n" /* save original stack pointer on idle stack */ + "mov sp, %0 \n" /* switch stacks */ + : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); + (void)core; +} + +/*--------------------------------------------------------------------------- + * Perform core switch steps that need to take place inside switch_thread. + * + * These steps must take place while before changing the processor and after + * having entered switch_thread since switch_thread may not do a normal return + * because the stack being used for anything the compiler saved will not belong + * to the thread's destination core and it may have been recycled for other + * purposes by the time a normal context load has taken place. switch_thread + * will also clobber anything stashed in the thread's context or stored in the + * nonvolatile registers if it is saved there before the call since the + * compiler's order of operations cannot be known for certain. + */ +static void core_switch_blk_op(unsigned int core, struct thread_entry *thread) +{ + /* Flush our data to ram */ + cpucache_flush(); + /* Stash thread in r4 slot */ + thread->context.r[0] = (uint32_t)thread; + /* Stash restart address in r5 slot */ + thread->context.r[1] = thread->context.start; + /* Save sp in context.sp while still running on old core */ + thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1]; +} + +/*--------------------------------------------------------------------------- + * Machine-specific helper function for switching the processor a thread is + * running on. Basically, the thread suicides on the departing core and is + * reborn on the destination. Were it not for gcc's ill-behavior regarding + * naked functions written in C where it actually clobbers non-volatile + * registers before the intended prologue code, this would all be much + * simpler. Generic setup is done in switch_core itself. + */ + +/*--------------------------------------------------------------------------- + * This actually performs the core switch. + */ +static void __attribute__((naked)) + switch_thread_core(unsigned int core, struct thread_entry *thread) +{ + /* Pure asm for this because compiler behavior isn't sufficiently predictable. + * Stack access also isn't permitted until restoring the original stack and + * context. */ + asm volatile ( + "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ + "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ + "ldr r2, [r2, r0, lsl #2] \n" + "add r2, r2, %0*4 \n" + "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ + "mov sp, r2 \n" /* switch stacks */ + "adr r2, 1f \n" /* r2 = new core restart address */ + "str r2, [r1, #40] \n" /* thread->context.start = r2 */ + "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ + "1: \n" + "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ + "mov r1, #0 \n" /* Clear start address */ + "str r1, [r0, #40] \n" + "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */ + "mov lr, pc \n" + "bx r0 \n" + "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ + ".ltorg \n" /* Dump constant pool */ + : : "i"(IDLE_STACK_WORDS) + ); + (void)core; (void)thread; +} + +/*--------------------------------------------------------------------------- + * Do any device-specific inits for the threads and synchronize the kernel + * initializations. + *--------------------------------------------------------------------------- + */ +static void core_thread_init(unsigned int core) INIT_ATTR; +static void core_thread_init(unsigned int core) +{ + if (core == CPU) + { + /* Wake up coprocessor and let it initialize kernel and threads */ +#ifdef CPU_PP502x + MBX_MSG_CLR = 0x3f; +#endif + wake_core(COP); + /* Sleep until COP has finished */ + sleep_core(CPU); + } + else + { + /* Wake the CPU and return */ + wake_core(CPU); + } +} +#endif /* NUM_CORES */ + diff --git a/firmware/target/coldfire/thread-coldfire.c b/firmware/target/coldfire/thread-coldfire.c new file mode 100644 index 0000000000..f151a971c7 --- /dev/null +++ b/firmware/target/coldfire/thread-coldfire.c @@ -0,0 +1,97 @@ +/*************************************************************************** + * __________ __ ___. + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ + * \/ \/ \/ \/ \/ + * $Id$ + * + * Copyright (C) 2004 by Linus Nielsen Feltzing + * + * Coldfire processor threading support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ****************************************************************************/ + +/*--------------------------------------------------------------------------- + * Start the thread running and terminate it if it returns + *--------------------------------------------------------------------------- + */ +void start_thread(void); /* Provide C access to ASM label */ +static void __attribute__((used)) __start_thread(void) +{ + /* a0=macsr, a1=context */ + asm volatile ( + "start_thread: \n" /* Start here - no naked attribute */ + "move.l %a0, %macsr \n" /* Set initial mac status reg */ + "lea.l 48(%a1), %a1 \n" + "move.l (%a1)+, %sp \n" /* Set initial stack */ + "move.l (%a1), %a2 \n" /* Fetch thread function pointer */ + "clr.l (%a1) \n" /* Mark thread running */ + "jsr (%a2) \n" /* Call thread function */ + ); + thread_exit(); +} + +/* Set EMAC unit to fractional mode with saturation for each new thread, + * since that's what'll be the most useful for most things which the dsp + * will do. Codecs should still initialize their preferred modes + * explicitly. Context pointer is placed in d2 slot and start_thread + * pointer in d3 slot. thread function pointer is placed in context.start. + * See load_context for what happens when thread is initially going to + * run. + */ +#define THREAD_STARTUP_INIT(core, thread, function) \ + ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \ + (thread)->context.d[0] = (uint32_t)&(thread)->context, \ + (thread)->context.d[1] = (uint32_t)start_thread, \ + (thread)->context.start = (uint32_t)(function); }) + +/*--------------------------------------------------------------------------- + * Store non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void store_context(void* addr) +{ + asm volatile ( + "move.l %%macsr,%%d0 \n" + "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n" + : : "a" (addr) : "d0" /* only! */ + ); +} + +/*--------------------------------------------------------------------------- + * Load non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void load_context(const void* addr) +{ + asm volatile ( + "move.l 52(%0), %%d0 \n" /* Get start address */ + "beq.b 1f \n" /* NULL -> already running */ + "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */ + "jmp (%%a2) \n" /* Start the thread */ + "1: \n" + "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */ + "move.l %%d0, %%macsr \n" + : : "a" (addr) : "d0" /* only! */ + ); +} + +/*--------------------------------------------------------------------------- + * Put core in a power-saving state if waking list wasn't repopulated. + *--------------------------------------------------------------------------- + */ +static inline void core_sleep(void) +{ + /* Supervisor mode, interrupts enabled upon wakeup */ + asm volatile ("stop #0x2000"); +}; diff --git a/firmware/target/mips/thread-mips32.c b/firmware/target/mips/thread-mips32.c new file mode 100644 index 0000000000..e2fccb8022 --- /dev/null +++ b/firmware/target/mips/thread-mips32.c @@ -0,0 +1,133 @@ +/*************************************************************************** + * __________ __ ___. + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ + * \/ \/ \/ \/ \/ + * $Id$ + * + * Copyright (C) 2008 by Maurus Cuelenaere + * + * 32-bit MIPS threading support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ****************************************************************************/ + +/*--------------------------------------------------------------------------- + * Start the thread running and terminate it if it returns + *--------------------------------------------------------------------------- + */ + +void start_thread(void); /* Provide C access to ASM label */ +static void __attribute__((used)) _start_thread(void) +{ + /* t1 = context */ + asm volatile ( + "start_thread: \n" + ".set noreorder \n" + ".set noat \n" + "lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */ + "lw $29, 36($9) \n" /* Set initial sp(=$29) */ + "jalr $8 \n" /* Start the thread */ + "sw $0, 44($9) \n" /* Clear start address */ + ".set at \n" + ".set reorder \n" + ); + thread_exit(); +} + +/* Place context pointer in s0 slot, function pointer in s1 slot, and + * start_thread pointer in context_start */ +#define THREAD_STARTUP_INIT(core, thread, function) \ + ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ + (thread)->context.r[1] = (uint32_t)(function), \ + (thread)->context.start = (uint32_t)start_thread; }) + +/*--------------------------------------------------------------------------- + * Store non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void store_context(void* addr) +{ + asm volatile ( + ".set noreorder \n" + ".set noat \n" + "sw $16, 0(%0) \n" /* s0 */ + "sw $17, 4(%0) \n" /* s1 */ + "sw $18, 8(%0) \n" /* s2 */ + "sw $19, 12(%0) \n" /* s3 */ + "sw $20, 16(%0) \n" /* s4 */ + "sw $21, 20(%0) \n" /* s5 */ + "sw $22, 24(%0) \n" /* s6 */ + "sw $23, 28(%0) \n" /* s7 */ + "sw $30, 32(%0) \n" /* fp */ + "sw $29, 36(%0) \n" /* sp */ + "sw $31, 40(%0) \n" /* ra */ + ".set at \n" + ".set reorder \n" + : : "r" (addr) + ); +} + +/*--------------------------------------------------------------------------- + * Load non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void load_context(const void* addr) +{ + asm volatile ( + ".set noat \n" + ".set noreorder \n" + "lw $8, 44(%0) \n" /* Get start address ($8 = t0) */ + "beqz $8, running \n" /* NULL -> already running */ + "nop \n" + "jr $8 \n" + "move $9, %0 \n" /* t1 = context */ + "running: \n" + "lw $16, 0(%0) \n" /* s0 */ + "lw $17, 4(%0) \n" /* s1 */ + "lw $18, 8(%0) \n" /* s2 */ + "lw $19, 12(%0) \n" /* s3 */ + "lw $20, 16(%0) \n" /* s4 */ + "lw $21, 20(%0) \n" /* s5 */ + "lw $22, 24(%0) \n" /* s6 */ + "lw $23, 28(%0) \n" /* s7 */ + "lw $30, 32(%0) \n" /* fp */ + "lw $29, 36(%0) \n" /* sp */ + "lw $31, 40(%0) \n" /* ra */ + ".set at \n" + ".set reorder \n" + : : "r" (addr) : "t0", "t1" + ); +} + +/*--------------------------------------------------------------------------- + * Put core in a power-saving state. + *--------------------------------------------------------------------------- + */ +static inline void core_sleep(void) +{ +#if CONFIG_CPU == JZ4732 + __cpm_idle_mode(); +#endif + asm volatile(".set mips32r2 \n" + "mfc0 $8, $12 \n" /* mfc t0, $12 */ + "move $9, $8 \n" /* move t1, t0 */ + "la $10, 0x8000000 \n" /* la t2, 0x8000000 */ + "or $8, $8, $10 \n" /* Enable reduced power mode */ + "mtc0 $8, $12 \n" /* mtc t0, $12 */ + "wait \n" + "mtc0 $9, $12 \n" /* mtc t1, $12 */ + ".set mips0 \n" + ::: "t0", "t1", "t2" + ); + enable_irq(); +} diff --git a/firmware/target/sh/thread-sh.c b/firmware/target/sh/thread-sh.c new file mode 100644 index 0000000000..25e0aadf96 --- /dev/null +++ b/firmware/target/sh/thread-sh.c @@ -0,0 +1,109 @@ +/*************************************************************************** + * __________ __ ___. + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ + * \/ \/ \/ \/ \/ + * $Id$ + * + * Copyright (C) 2002 by Ulf Ralberg + * + * SH processor threading support + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ****************************************************************************/ + +/*--------------------------------------------------------------------------- + * Start the thread running and terminate it if it returns + *--------------------------------------------------------------------------- + */ +void start_thread(void); /* Provide C access to ASM label */ +static void __attribute__((used)) __start_thread(void) +{ + /* r8 = context */ + asm volatile ( + "_start_thread: \n" /* Start here - no naked attribute */ + "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */ + "mov.l @(28, r8), r15 \n" /* Set initial sp */ + "mov #0, r1 \n" /* Start the thread */ + "jsr @r0 \n" + "mov.l r1, @(36, r8) \n" /* Clear start address */ + ); + thread_exit(); +} + +/* Place context pointer in r8 slot, function pointer in r9 slot, and + * start_thread pointer in context_start */ +#define THREAD_STARTUP_INIT(core, thread, function) \ + ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \ + (thread)->context.r[1] = (uint32_t)(function), \ + (thread)->context.start = (uint32_t)start_thread; }) + +/*--------------------------------------------------------------------------- + * Store non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void store_context(void* addr) +{ + asm volatile ( + "add #36, %0 \n" /* Start at last reg. By the time routine */ + "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */ + "mov.l r15,@-%0 \n" + "mov.l r14,@-%0 \n" + "mov.l r13,@-%0 \n" + "mov.l r12,@-%0 \n" + "mov.l r11,@-%0 \n" + "mov.l r10,@-%0 \n" + "mov.l r9, @-%0 \n" + "mov.l r8, @-%0 \n" + : : "r" (addr) + ); +} + +/*--------------------------------------------------------------------------- + * Load non-volatile context. + *--------------------------------------------------------------------------- + */ +static inline void load_context(const void* addr) +{ + asm volatile ( + "mov.l @(36, %0), r0 \n" /* Get start address */ + "tst r0, r0 \n" + "bt .running \n" /* NULL -> already running */ + "jmp @r0 \n" /* r8 = context */ + ".running: \n" + "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */ + "mov.l @%0+, r9 \n" + "mov.l @%0+, r10 \n" + "mov.l @%0+, r11 \n" + "mov.l @%0+, r12 \n" + "mov.l @%0+, r13 \n" + "mov.l @%0+, r14 \n" + "mov.l @%0+, r15 \n" + "lds.l @%0+, pr \n" + : : "r" (addr) : "r0" /* only! */ + ); +} + +/*--------------------------------------------------------------------------- + * Put core in a power-saving state. + *--------------------------------------------------------------------------- + */ +static inline void core_sleep(void) +{ + asm volatile ( + "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */ + "mov #0, r1 \n" /* Enable interrupts */ + "ldc r1, sr \n" /* Following instruction cannot be interrupted */ + "sleep \n" /* Execute standby */ + : : "z"(&SBYCR-GBR) : "r1"); +} + -- cgit v1.2.3