summaryrefslogtreecommitdiff
path: root/firmware/export/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/export/thread.h')
-rw-r--r--firmware/export/thread.h126
1 files changed, 19 insertions, 107 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index a26b5962e2..8912283343 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -109,6 +109,23 @@ struct regs
109 uint32_t lr; /* 36 - r14 (lr) */ 109 uint32_t lr; /* 36 - r14 (lr) */
110 uint32_t start; /* 40 - Thread start address, or NULL when started */ 110 uint32_t start; /* 40 - Thread start address, or NULL when started */
111}; 111};
112
113#ifdef CPU_PP
114#ifdef HAVE_CORELOCK_OBJECT
115/* No reliable atomic instruction available - use Peterson's algorithm */
116struct corelock
117{
118 volatile unsigned char myl[NUM_CORES];
119 volatile unsigned char turn;
120} __attribute__((packed));
121
122/* Too big to inline everywhere */
123void corelock_init(struct corelock *cl);
124void corelock_lock(struct corelock *cl);
125int corelock_try_lock(struct corelock *cl);
126void corelock_unlock(struct corelock *cl);
127#endif /* HAVE_CORELOCK_OBJECT */
128#endif /* CPU_PP */
112#elif defined(CPU_MIPS) 129#elif defined(CPU_MIPS)
113struct regs 130struct regs
114{ 131{
@@ -162,26 +179,13 @@ struct thread_list
162 struct thread_entry *next; /* Next thread in a list */ 179 struct thread_entry *next; /* Next thread in a list */
163}; 180};
164 181
165/* Small objects for core-wise mutual exclusion */ 182#ifndef HAVE_CORELOCK_OBJECT
166#if CONFIG_CORELOCK == SW_CORELOCK
167/* No reliable atomic instruction available - use Peterson's algorithm */
168struct corelock
169{
170 volatile unsigned char myl[NUM_CORES];
171 volatile unsigned char turn;
172} __attribute__((packed));
173
174void corelock_init(struct corelock *cl);
175void corelock_lock(struct corelock *cl);
176int corelock_try_lock(struct corelock *cl);
177void corelock_unlock(struct corelock *cl);
178#else
179/* No atomic corelock op needed or just none defined */ 183/* No atomic corelock op needed or just none defined */
180#define corelock_init(cl) 184#define corelock_init(cl)
181#define corelock_lock(cl) 185#define corelock_lock(cl)
182#define corelock_try_lock(cl) 186#define corelock_try_lock(cl)
183#define corelock_unlock(cl) 187#define corelock_unlock(cl)
184#endif /* core locking selection */ 188#endif /* HAVE_CORELOCK_OBJECT */
185 189
186#ifdef HAVE_PRIORITY_SCHEDULING 190#ifdef HAVE_PRIORITY_SCHEDULING
187struct blocker 191struct blocker
@@ -341,98 +345,6 @@ struct core_entry
341#define IFN_PRIO(...) __VA_ARGS__ 345#define IFN_PRIO(...) __VA_ARGS__
342#endif 346#endif
343 347
344/* Macros generate better code than an inline function is this case */
345#if defined (CPU_ARM)
346/* atomic */
347#if CONFIG_CORELOCK == SW_CORELOCK
348#define test_and_set(a, v, cl) \
349 xchg8((a), (v), (cl))
350/* atomic */
351#define xchg8(a, v, cl) \
352({ uint32_t o; \
353 corelock_lock(cl); \
354 o = *(uint8_t *)(a); \
355 *(uint8_t *)(a) = (v); \
356 corelock_unlock(cl); \
357 o; })
358#define xchg32(a, v, cl) \
359({ uint32_t o; \
360 corelock_lock(cl); \
361 o = *(uint32_t *)(a); \
362 *(uint32_t *)(a) = (v); \
363 corelock_unlock(cl); \
364 o; })
365#define xchgptr(a, v, cl) \
366({ typeof (*(a)) o; \
367 corelock_lock(cl); \
368 o = *(a); \
369 *(a) = (v); \
370 corelock_unlock(cl); \
371 o; })
372#endif /* locking selection */
373#elif defined (CPU_COLDFIRE)
374/* atomic */
375/* one branch will be optimized away if v is a constant expression */
376#define test_and_set(a, v, ...) \
377({ uint32_t o = 0; \
378 if (v) { \
379 asm volatile ( \
380 "bset.b #0, (%0)" \
381 : : "a"((uint8_t*)(a)) \
382 : "cc"); \
383 } else { \
384 asm volatile ( \
385 "bclr.b #0, (%0)" \
386 : : "a"((uint8_t*)(a)) \
387 : "cc"); \
388 } \
389 asm volatile ("sne.b %0" \
390 : "+d"(o)); \
391 o; })
392#elif CONFIG_CPU == SH7034
393/* atomic */
394#define test_and_set(a, v, ...) \
395({ uint32_t o; \
396 asm volatile ( \
397 "tas.b @%2 \n" \
398 "mov #-1, %0 \n" \
399 "negc %0, %0 \n" \
400 : "=r"(o) \
401 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
402 "r"((uint8_t *)(a))); \
403 o; })
404#endif /* CONFIG_CPU == */
405
406/* defaults for no asm version */
407#ifndef test_and_set
408/* not atomic */
409#define test_and_set(a, v, ...) \
410({ uint32_t o = *(uint8_t *)(a); \
411 *(uint8_t *)(a) = (v); \
412 o; })
413#endif /* test_and_set */
414#ifndef xchg8
415/* not atomic */
416#define xchg8(a, v, ...) \
417({ uint32_t o = *(uint8_t *)(a); \
418 *(uint8_t *)(a) = (v); \
419 o; })
420#endif /* xchg8 */
421#ifndef xchg32
422/* not atomic */
423#define xchg32(a, v, ...) \
424({ uint32_t o = *(uint32_t *)(a); \
425 *(uint32_t *)(a) = (v); \
426 o; })
427#endif /* xchg32 */
428#ifndef xchgptr
429/* not atomic */
430#define xchgptr(a, v, ...) \
431({ typeof (*(a)) o = *(a); \
432 *(a) = (v); \
433 o; })
434#endif /* xchgptr */
435
436void core_idle(void); 348void core_idle(void);
437void core_wake(IF_COP_VOID(unsigned int core)); 349void core_wake(IF_COP_VOID(unsigned int core));
438 350