summaryrefslogtreecommitdiff
path: root/firmware/target/arm/pp/thread-pp.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-08-08 06:33:51 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-16 05:15:37 -0400
commit6ed00870abd566d7267d2436c2693f5a281cda2f (patch)
tree6011c73e302254fc73f61a1b8b1f295ded1f5d56 /firmware/target/arm/pp/thread-pp.c
parenteb63d8b4a2a7cbe4e98216b48a75391718fcebd7 (diff)
downloadrockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.tar.gz
rockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.zip
Base scheduler queues off linked lists and do cleanup/consolidation
Abstracts threading from itself a bit, changes the way its queues are handled and does type hiding for that as well. Do alot here due to already required major brain surgery. Threads may now be on a run queue and a wait queue simultaneously so that the expired timer only has to wake the thread but not remove it from the wait queue which simplifies the implicit wake handling. List formats change for wait queues-- doubly-linked, not circular. Timeout queue is now singly-linked. The run queue is still circular as before. Adds a better thread slot allocator that may keep the slot marked as used regardless of the thread state. Assists in dumping special tasks that switch_thread was tasked to perform (blocking tasks). Deletes alot of code yet surprisingly, gets larger than expected. Well, I'm not not minding that for the time being-- omlettes and break a few eggs and all that. Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
Diffstat (limited to 'firmware/target/arm/pp/thread-pp.c')
-rw-r--r--firmware/target/arm/pp/thread-pp.c95
1 files changed, 36 insertions, 59 deletions
diff --git a/firmware/target/arm/pp/thread-pp.c b/firmware/target/arm/pp/thread-pp.c
index 184d243e8d..0af8caa43a 100644
--- a/firmware/target/arm/pp/thread-pp.c
+++ b/firmware/target/arm/pp/thread-pp.c
@@ -82,46 +82,22 @@ static void INIT_ATTR core_thread_init(unsigned int core)
82 * to use a stack from an unloaded module until another thread runs on it. 82 * to use a stack from an unloaded module until another thread runs on it.
83 *--------------------------------------------------------------------------- 83 *---------------------------------------------------------------------------
84 */ 84 */
85static inline void NORETURN_ATTR __attribute__((always_inline)) 85static void __attribute__((naked, noinline, noreturn))
86 thread_final_exit(struct thread_entry *current) 86 thread_exit_finalize(unsigned int core, struct thread_entry *current)
87{ 87{
88 asm volatile ( 88 asm volatile (
89 "cmp %1, #0 \n" /* CPU? */ 89 "ldr r2, =idle_stacks \n" /* switch to idle stack */
90 "ldr sp, [r2, r0, lsl #2] \n"
91 "add sp, sp, %0*4 \n"
92 "cmp r0, #0 \n" /* CPU? */
93 "mov r4, r1 \n"
90 "blne commit_dcache \n" 94 "blne commit_dcache \n"
91 "mov r0, %0 \n" /* copy thread parameter */ 95 "mov r0, r4 \n"
92 "mov sp, %2 \n" /* switch to idle stack */ 96 "b thread_exit_final \n"
93 "bl thread_final_exit_do \n" /* finish removal */ 97 : : "i"(IDLE_STACK_WORDS));
94 : : "r"(current),
95 "r"(current->core),
96 "r"(&idle_stacks[current->core][IDLE_STACK_WORDS])
97 : "r0", "r1", "r2", "r3", "ip", "lr"); /* Because of flush call,
98 force inputs out
99 of scratch regs */
100 while (1);
101}
102 98
103/*--------------------------------------------------------------------------- 99 while (1);
104 * Perform core switch steps that need to take place inside switch_thread. 100 (void)core; (void)current;
105 *
106 * These steps must take place while before changing the processor and after
107 * having entered switch_thread since switch_thread may not do a normal return
108 * because the stack being used for anything the compiler saved will not belong
109 * to the thread's destination core and it may have been recycled for other
110 * purposes by the time a normal context load has taken place. switch_thread
111 * will also clobber anything stashed in the thread's context or stored in the
112 * nonvolatile registers if it is saved there before the call since the
113 * compiler's order of operations cannot be known for certain.
114 */
115static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
116{
117 /* Flush our data to ram */
118 commit_dcache();
119 /* Stash thread in r4 slot */
120 thread->context.r[0] = (uint32_t)thread;
121 /* Stash restart address in r5 slot */
122 thread->context.r[1] = thread->context.start;
123 /* Save sp in context.sp while still running on old core */
124 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
125} 101}
126 102
127/*--------------------------------------------------------------------------- 103/*---------------------------------------------------------------------------
@@ -136,31 +112,32 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
136/*--------------------------------------------------------------------------- 112/*---------------------------------------------------------------------------
137 * This actually performs the core switch. 113 * This actually performs the core switch.
138 */ 114 */
139static void __attribute__((naked)) 115static void __attribute__((naked, noinline))
140 switch_thread_core(unsigned int core, struct thread_entry *thread) 116 switch_thread_core(unsigned int old_core, struct thread_entry *thread)
141{ 117{
142 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
143 * Stack access also isn't permitted until restoring the original stack and
144 * context. */
145 asm volatile ( 118 asm volatile (
146 "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ 119 "stmfd sp!, { r4-r5, lr } \n" /* can't use the first two ctx fields */
147 "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ 120 "add r2, r1, #8 \n"
148 "ldr r2, [r2, r0, lsl #2] \n" 121 "stmia r2, { r6-r11, sp } \n" /* save remaining context */
149 "add r2, r2, %0*4 \n" 122 "adr r2, .new_core_restart \n" /* save context ptr + restart address */
150 "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ 123 "str r2, [r1, #40] \n" /* make 'start' non-null */
151 "mov sp, r2 \n" /* switch stacks */ 124 "stmia r1, { r1-r2 } \n"
152 "adr r2, 1f \n" /* r2 = new core restart address */ 125 "ldr r2, =idle_stacks \n" /* switch to idle stack on old core */
153 "str r2, [r1, #40] \n" /* thread->context.start = r2 */ 126 "ldr sp, [r2, r0, lsl #2] \n"
154 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ 127 "add sp, sp, %0*4 \n"
155 "1: \n" 128 "stmfd sp!, { r0-r1 } \n"
156 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ 129 "bl commit_dcache \n" /* write back everything */
157 "mov r1, #0 \n" /* Clear start address */ 130 "ldmfd sp!, { r0-r1 } \n"
158 "str r1, [r0, #40] \n" 131 "b switch_core_final \n"
159 "bl commit_discard_idcache \n" /* Invalidate new core's cache */ 132 ".new_core_restart: \n"
160 "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ 133 "mov r1, #0 \n" /* mark as started */
161 : : "i"(IDLE_STACK_WORDS) 134 "str r1, [r0, #40] \n"
162 ); 135 "add r0, r0, #8 \n"
163 (void)core; (void)thread; 136 "ldmia r0, { r6-r11, sp } \n" /* restore non-volatiles and stack */
137 "bl commit_discard_idcache \n" /* invalidate new core's cache */
138 "ldmfd sp!, { r4-r5, pc } \n" /* restore remaining context */
139 : : "i"(IDLE_STACK_WORDS));
140 (void)old_core; (void)thread;
164} 141}
165 142
166/** PP-model-specific dual-core code **/ 143/** PP-model-specific dual-core code **/