summaryrefslogtreecommitdiff
path: root/firmware/target/arm/pp
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2013-12-04 17:06:17 +0100
committerThomas Martitz <kugel@rockbox.org>2014-03-03 18:11:57 +0100
commit382d1861af12741af4ff235b9d18f179c0adc4c5 (patch)
tree26166c130d2889bb1ae1082e8f7aba103534f49e /firmware/target/arm/pp
parent8bae5f2644b5d5759499fbf1066b9c35c6f859ad (diff)
downloadrockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.tar.gz
rockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.zip
kernel: Break out kernel primitives into separate files and move to separate dir.
No code changed, just shuffling stuff around. This should make it easier to build only select parts kernel and use different implementations. Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
Diffstat (limited to 'firmware/target/arm/pp')
-rw-r--r--firmware/target/arm/pp/debug-pp.c3
-rw-r--r--firmware/target/arm/pp/thread-pp.c122
2 files changed, 2 insertions, 123 deletions
diff --git a/firmware/target/arm/pp/debug-pp.c b/firmware/target/arm/pp/debug-pp.c
index 2f57e1ef14..9e0dcad5f9 100644
--- a/firmware/target/arm/pp/debug-pp.c
+++ b/firmware/target/arm/pp/debug-pp.c
@@ -19,9 +19,10 @@
19 * 19 *
20 ****************************************************************************/ 20 ****************************************************************************/
21 21
22#include <stdbool.h>
22#include "config.h" 23#include "config.h"
23#include "system.h" 24#include "system.h"
24#include <stdbool.h> 25#include "kernel.h"
25#include "font.h" 26#include "font.h"
26#include "lcd.h" 27#include "lcd.h"
27#include "button.h" 28#include "button.h"
diff --git a/firmware/target/arm/pp/thread-pp.c b/firmware/target/arm/pp/thread-pp.c
index 0836b27204..ed4bdbeac1 100644
--- a/firmware/target/arm/pp/thread-pp.c
+++ b/firmware/target/arm/pp/thread-pp.c
@@ -51,128 +51,6 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
51 [COP] = cop_idlestackbegin 51 [COP] = cop_idlestackbegin
52}; 52};
53 53
54/* Core locks using Peterson's mutual exclusion algorithm */
55
56/*---------------------------------------------------------------------------
57 * Initialize the corelock structure.
58 *---------------------------------------------------------------------------
59 */
60void corelock_init(struct corelock *cl)
61{
62 memset(cl, 0, sizeof (*cl));
63}
64
65#if 1 /* Assembly locks to minimize overhead */
66/*---------------------------------------------------------------------------
67 * Wait for the corelock to become free and acquire it when it does.
68 *---------------------------------------------------------------------------
69 */
70void __attribute__((naked)) corelock_lock(struct corelock *cl)
71{
72 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
73 asm volatile (
74 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
75 "ldrb r1, [r1] \n"
76 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
77 "eor r2, r1, #0xff \n" /* r2 = othercore */
78 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
79 "1: \n"
80 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
81 "cmp r3, #0 \n" /* yes? lock acquired */
82 "bxeq lr \n"
83 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
84 "cmp r3, r1 \n"
85 "bxeq lr \n" /* yes? lock acquired */
86 "b 1b \n" /* keep trying */
87 : : "i"(&PROCESSOR_ID)
88 );
89 (void)cl;
90}
91
92/*---------------------------------------------------------------------------
93 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
94 *---------------------------------------------------------------------------
95 */
96int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
97{
98 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
99 asm volatile (
100 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
101 "ldrb r1, [r1] \n"
102 "mov r3, r0 \n"
103 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
104 "eor r2, r1, #0xff \n" /* r2 = othercore */
105 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
106 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
107 "eors r0, r0, r2 \n" /* yes? lock acquired */
108 "bxne lr \n"
109 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
110 "ands r0, r0, r1 \n"
111 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
112 "bx lr \n" /* return result */
113 : : "i"(&PROCESSOR_ID)
114 );
115
116 return 0;
117 (void)cl;
118}
119
120/*---------------------------------------------------------------------------
121 * Release ownership of the corelock
122 *---------------------------------------------------------------------------
123 */
124void __attribute__((naked)) corelock_unlock(struct corelock *cl)
125{
126 asm volatile (
127 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
128 "ldrb r1, [r1] \n"
129 "mov r2, #0 \n" /* cl->myl[core] = 0 */
130 "strb r2, [r0, r1, lsr #7] \n"
131 "bx lr \n"
132 : : "i"(&PROCESSOR_ID)
133 );
134 (void)cl;
135}
136
137#else /* C versions for reference */
138
139void corelock_lock(struct corelock *cl)
140{
141 const unsigned int core = CURRENT_CORE;
142 const unsigned int othercore = 1 - core;
143
144 cl->myl[core] = core;
145 cl->turn = othercore;
146
147 for (;;)
148 {
149 if (cl->myl[othercore] == 0 || cl->turn == core)
150 break;
151 }
152}
153
154int corelock_try_lock(struct corelock *cl)
155{
156 const unsigned int core = CURRENT_CORE;
157 const unsigned int othercore = 1 - core;
158
159 cl->myl[core] = core;
160 cl->turn = othercore;
161
162 if (cl->myl[othercore] == 0 || cl->turn == core)
163 {
164 return 1;
165 }
166
167 cl->myl[core] = 0;
168 return 0;
169}
170
171void corelock_unlock(struct corelock *cl)
172{
173 cl->myl[CURRENT_CORE] = 0;
174}
175#endif /* ASM / C selection */
176 54
177/*--------------------------------------------------------------------------- 55/*---------------------------------------------------------------------------
178 * Do any device-specific inits for the threads and synchronize the kernel 56 * Do any device-specific inits for the threads and synchronize the kernel