summaryrefslogtreecommitdiff
path: root/firmware/target/arm/pp/thread-pp.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm/pp/thread-pp.c')
-rw-r--r--firmware/target/arm/pp/thread-pp.c122
1 files changed, 0 insertions, 122 deletions
diff --git a/firmware/target/arm/pp/thread-pp.c b/firmware/target/arm/pp/thread-pp.c
index 0836b27204..ed4bdbeac1 100644
--- a/firmware/target/arm/pp/thread-pp.c
+++ b/firmware/target/arm/pp/thread-pp.c
@@ -51,128 +51,6 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
51 [COP] = cop_idlestackbegin 51 [COP] = cop_idlestackbegin
52}; 52};
53 53
54/* Core locks using Peterson's mutual exclusion algorithm */
55
56/*---------------------------------------------------------------------------
57 * Initialize the corelock structure.
58 *---------------------------------------------------------------------------
59 */
60void corelock_init(struct corelock *cl)
61{
62 memset(cl, 0, sizeof (*cl));
63}
64
65#if 1 /* Assembly locks to minimize overhead */
66/*---------------------------------------------------------------------------
67 * Wait for the corelock to become free and acquire it when it does.
68 *---------------------------------------------------------------------------
69 */
70void __attribute__((naked)) corelock_lock(struct corelock *cl)
71{
72 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
73 asm volatile (
74 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
75 "ldrb r1, [r1] \n"
76 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
77 "eor r2, r1, #0xff \n" /* r2 = othercore */
78 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
79 "1: \n"
80 "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
81 "cmp r3, #0 \n" /* yes? lock acquired */
82 "bxeq lr \n"
83 "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
84 "cmp r3, r1 \n"
85 "bxeq lr \n" /* yes? lock acquired */
86 "b 1b \n" /* keep trying */
87 : : "i"(&PROCESSOR_ID)
88 );
89 (void)cl;
90}
91
92/*---------------------------------------------------------------------------
93 * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
94 *---------------------------------------------------------------------------
95 */
96int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
97{
98 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
99 asm volatile (
100 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
101 "ldrb r1, [r1] \n"
102 "mov r3, r0 \n"
103 "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
104 "eor r2, r1, #0xff \n" /* r2 = othercore */
105 "strb r2, [r0, #2] \n" /* cl->turn = othercore */
106 "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
107 "eors r0, r0, r2 \n" /* yes? lock acquired */
108 "bxne lr \n"
109 "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
110 "ands r0, r0, r1 \n"
111 "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
112 "bx lr \n" /* return result */
113 : : "i"(&PROCESSOR_ID)
114 );
115
116 return 0;
117 (void)cl;
118}
119
120/*---------------------------------------------------------------------------
121 * Release ownership of the corelock
122 *---------------------------------------------------------------------------
123 */
124void __attribute__((naked)) corelock_unlock(struct corelock *cl)
125{
126 asm volatile (
127 "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
128 "ldrb r1, [r1] \n"
129 "mov r2, #0 \n" /* cl->myl[core] = 0 */
130 "strb r2, [r0, r1, lsr #7] \n"
131 "bx lr \n"
132 : : "i"(&PROCESSOR_ID)
133 );
134 (void)cl;
135}
136
137#else /* C versions for reference */
138
139void corelock_lock(struct corelock *cl)
140{
141 const unsigned int core = CURRENT_CORE;
142 const unsigned int othercore = 1 - core;
143
144 cl->myl[core] = core;
145 cl->turn = othercore;
146
147 for (;;)
148 {
149 if (cl->myl[othercore] == 0 || cl->turn == core)
150 break;
151 }
152}
153
154int corelock_try_lock(struct corelock *cl)
155{
156 const unsigned int core = CURRENT_CORE;
157 const unsigned int othercore = 1 - core;
158
159 cl->myl[core] = core;
160 cl->turn = othercore;
161
162 if (cl->myl[othercore] == 0 || cl->turn == core)
163 {
164 return 1;
165 }
166
167 cl->myl[core] = 0;
168 return 0;
169}
170
171void corelock_unlock(struct corelock *cl)
172{
173 cl->myl[CURRENT_CORE] = 0;
174}
175#endif /* ASM / C selection */
176 54
177/*--------------------------------------------------------------------------- 55/*---------------------------------------------------------------------------
178 * Do any device-specific inits for the threads and synchronize the kernel 56 * Do any device-specific inits for the threads and synchronize the kernel