summaryrefslogtreecommitdiff
path: root/firmware/export
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/export')
-rw-r--r--firmware/export/config.h53
-rw-r--r--firmware/export/i2c-pp.h4
-rw-r--r--firmware/export/kernel.h103
-rw-r--r--firmware/export/pp5002.h2
-rw-r--r--firmware/export/pp5020.h15
-rw-r--r--firmware/export/system.h4
-rw-r--r--firmware/export/thread.h552
7 files changed, 591 insertions, 142 deletions
diff --git a/firmware/export/config.h b/firmware/export/config.h
index 46d4336e70..46c4d3dfd2 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -282,9 +282,13 @@
282#define HAVE_EXTENDED_MESSAGING_AND_NAME 282#define HAVE_EXTENDED_MESSAGING_AND_NAME
283#endif 283#endif
284 284
285#if (CONFIG_CODEC == SWCODEC) && !defined(SIMULATOR) && !defined(BOOTLOADER) 285#if (CONFIG_CODEC == SWCODEC) && !defined(BOOTLOADER)
286#ifndef SIMULATOR
286#define HAVE_PRIORITY_SCHEDULING 287#define HAVE_PRIORITY_SCHEDULING
287#define HAVE_SCHEDULER_BOOSTCTRL 288#define HAVE_SCHEDULER_BOOSTCTRL
289#endif /* SIMULATOR */
290#define HAVE_SEMAPHORE_OBJECTS
291#define HAVE_EVENT_OBJECTS
288#endif 292#endif
289 293
290/* define for all cpus from SH family */ 294/* define for all cpus from SH family */
@@ -363,31 +367,70 @@
363#define IRAM_LCDFRAMEBUFFER 367#define IRAM_LCDFRAMEBUFFER
364#endif 368#endif
365 369
370/* Change this if you want to build a single-core firmware for a multicore
371 * target for debugging */
372#if defined(BOOTLOADER)
373#define FORCE_SINGLE_CORE
374#endif
375
376/* Core locking types - specifies type of atomic operation */
377#define CORELOCK_NONE 0
378#define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm
379 and not a special semaphore instruction */
380#define CORELOCK_SWAP 2 /* A swap (exchange) instruction */
381
366/* Dual core support - not yet working on the 1G/2G and 3G iPod */ 382/* Dual core support - not yet working on the 1G/2G and 3G iPod */
367#if defined(CPU_PP) 383#if defined(CPU_PP)
368#define IDLE_STACK_SIZE 0x80 384#define IDLE_STACK_SIZE 0x80
369#define IDLE_STACK_WORDS 0x20 385#define IDLE_STACK_WORDS 0x20
370 386
371#if !defined(BOOTLOADER) && CONFIG_CPU != PP5002 387#if !defined(FORCE_SINGLE_CORE) && CONFIG_CPU != PP5002
388
372#define NUM_CORES 2 389#define NUM_CORES 2
373#define CURRENT_CORE current_core() 390#define CURRENT_CORE current_core()
374/* Hopefully at some point we will learn how to mark areas of main memory as 391/* Use IRAM for variables shared across cores - large memory buffers should
375 * not to be cached. Until then, use IRAM for variables shared across cores */ 392 * use UNCACHED_ADDR(a) and be appropriately aligned and padded */
376#define NOCACHEBSS_ATTR IBSS_ATTR 393#define NOCACHEBSS_ATTR IBSS_ATTR
377#define NOCACHEDATA_ATTR IDATA_ATTR 394#define NOCACHEDATA_ATTR IDATA_ATTR
378 395
379#define IF_COP(...) __VA_ARGS__ 396#define IF_COP(...) __VA_ARGS__
397#define IF_COP_VOID(...) __VA_ARGS__
398#define IF_COP_CORE(core) core
399
400#if CONFIG_CPU == PP5020
401#define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */
402#else
403#define CONFIG_CORELOCK CORELOCK_SWAP
404#endif
405
380#endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */ 406#endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */
407
381#endif /* CPU_PP */ 408#endif /* CPU_PP */
382 409
410#ifndef CONFIG_CORELOCK
411#define CONFIG_CORELOCK CORELOCK_NONE
412#endif
413
414#if CONFIG_CORELOCK == SW_CORELOCK
415#define IF_SWCL(...) __VA_ARGS__
416#define IFN_SWCL(...)
417#else
418#define IF_SWCL(...)
419#define IFN_SWCL(...) __VA_ARGS__
420#endif /* CONFIG_CORELOCK == */
421
383#ifndef NUM_CORES 422#ifndef NUM_CORES
384/* Default to single core */ 423/* Default to single core */
385#define NUM_CORES 1 424#define NUM_CORES 1
386#define CURRENT_CORE CPU 425#define CURRENT_CORE CPU
387#define NOCACHEBSS_ATTR 426#define NOCACHEBSS_ATTR
388#define NOCACHEDATA_ATTR 427#define NOCACHEDATA_ATTR
428#define CONFIG_CORELOCK CORELOCK_NONE
389 429
390#define IF_COP(...) 430#define IF_COP(...)
431#define IF_COP_VOID(...) void
432#define IF_COP_CORE(core) CURRENT_CORE
433
391#endif /* NUM_CORES */ 434#endif /* NUM_CORES */
392 435
393#endif /* __CONFIG_H__ */ 436#endif /* __CONFIG_H__ */
diff --git a/firmware/export/i2c-pp.h b/firmware/export/i2c-pp.h
index 3048acbaba..908db22554 100644
--- a/firmware/export/i2c-pp.h
+++ b/firmware/export/i2c-pp.h
@@ -45,6 +45,10 @@
45 45
46/* TODO: Fully implement i2c driver */ 46/* TODO: Fully implement i2c driver */
47 47
48/* To be used by drivers that need to do multiple i2c operations
49 atomically */
50extern struct spinlock i2c_spin;
51
48void i2c_init(void); 52void i2c_init(void);
49int i2c_readbyte(unsigned int dev_addr, int addr); 53int i2c_readbyte(unsigned int dev_addr, int addr);
50int pp_i2c_send(unsigned int addr, int data0, int data1); 54int pp_i2c_send(unsigned int addr, int data0, int data1);
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 3d70e49a4c..a72e004b33 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -23,6 +23,8 @@
23#include <inttypes.h> 23#include <inttypes.h>
24#include "config.h" 24#include "config.h"
25 25
26#include "thread.h"
27
26/* wrap-safe macros for tick comparison */ 28/* wrap-safe macros for tick comparison */
27#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0) 29#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
28#define TIME_BEFORE(a,b) TIME_AFTER(b,a) 30#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
@@ -31,6 +33,7 @@
31 33
32#define MAX_NUM_TICK_TASKS 8 34#define MAX_NUM_TICK_TASKS 8
33 35
36#define MAX_NUM_QUEUES 32
34#define QUEUE_LENGTH 16 /* MUST be a power of 2 */ 37#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
35#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1) 38#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
36 39
@@ -72,7 +75,7 @@
72#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0) 75#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
73#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1) 76#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
74 77
75struct event 78struct queue_event
76{ 79{
77 long id; 80 long id;
78 intptr_t data; 81 intptr_t data;
@@ -91,20 +94,66 @@ struct queue_sender_list
91 94
92struct event_queue 95struct event_queue
93{ 96{
94 struct event events[QUEUE_LENGTH]; 97 struct thread_queue queue; /* Waiter list */
95 struct thread_entry *thread; 98 struct queue_event events[QUEUE_LENGTH]; /* list of events */
96 unsigned int read; 99 unsigned int read; /* head of queue */
97 unsigned int write; 100 unsigned int write; /* tail of queue */
98#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 101#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
99 struct queue_sender_list *send; 102 struct queue_sender_list *send; /* list of threads waiting for
103 reply to an event */
104#endif
105#if NUM_CORES > 1
106 struct corelock cl; /* inter-core sync */
100#endif 107#endif
101}; 108};
102 109
103struct mutex 110struct mutex
104{ 111{
105 uint32_t locked; 112 struct thread_entry *queue; /* Waiter list */
106 struct thread_entry *thread; 113#if CONFIG_CORELOCK == SW_CORELOCK
114 struct corelock cl; /* inter-core sync */
115#endif
116 struct thread_entry *thread; /* thread that owns lock */
117 int count; /* lock owner recursion count */
118 unsigned char locked; /* locked semaphore */
119};
120
121struct spinlock
122{
123#if NUM_CORES > 1
124 struct corelock cl; /* inter-core sync */
125#endif
126 struct thread_entry *thread; /* lock owner */
127 int count; /* lock owner recursion count */
128 unsigned char locked; /* is locked if nonzero */
129#if NUM_CORES > 1
130 unsigned char task_switch; /* can task switch? */
131#endif
132};
133
134#ifdef HAVE_SEMAPHORE_OBJECTS
135struct semaphore
136{
137 struct thread_entry *queue; /* Waiter list */
138#if CONFIG_CORELOCK == SW_CORELOCK
139 struct corelock cl; /* inter-core sync */
140#endif
141 int count; /* # of waits remaining before unsignaled */
142 int max; /* maximum # of waits to remain signaled */
143};
144#endif
145
146#ifdef HAVE_EVENT_OBJECTS
147struct event
148{
149 struct thread_entry *queues[2]; /* waiters for each state */
150#if CONFIG_CORELOCK == SW_CORELOCK
151 struct corelock cl; /* inter-core sync */
152#endif
153 unsigned char automatic; /* event performs auto-reset */
154 unsigned char state; /* state: 1 = signaled */
107}; 155};
156#endif
108 157
109/* global tick variable */ 158/* global tick variable */
110#if defined(CPU_PP) && defined(BOOTLOADER) 159#if defined(CPU_PP) && defined(BOOTLOADER)
@@ -127,6 +176,7 @@ extern void yield(void);
127extern void sleep(int ticks); 176extern void sleep(int ticks);
128int tick_add_task(void (*f)(void)); 177int tick_add_task(void (*f)(void));
129int tick_remove_task(void (*f)(void)); 178int tick_remove_task(void (*f)(void));
179extern void tick_start(unsigned int interval_in_ms);
130 180
131struct timeout; 181struct timeout;
132 182
@@ -150,10 +200,17 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
150 int ticks, intptr_t data); 200 int ticks, intptr_t data);
151void timeout_cancel(struct timeout *tmo); 201void timeout_cancel(struct timeout *tmo);
152 202
203#define STATE_NONSIGNALED 0
204#define STATE_SIGNALED 1
205
206#define WAIT_TIMEDOUT (-1)
207#define WAIT_SUCCEEDED 1
208
153extern void queue_init(struct event_queue *q, bool register_queue); 209extern void queue_init(struct event_queue *q, bool register_queue);
154extern void queue_delete(struct event_queue *q); 210extern void queue_delete(struct event_queue *q);
155extern void queue_wait(struct event_queue *q, struct event *ev); 211extern void queue_wait(struct event_queue *q, struct queue_event *ev);
156extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks); 212extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
213 int ticks);
157extern void queue_post(struct event_queue *q, long id, intptr_t data); 214extern void queue_post(struct event_queue *q, long id, intptr_t data);
158#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 215#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
159extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send); 216extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send);
@@ -168,14 +225,26 @@ extern int queue_count(const struct event_queue *q);
168extern int queue_broadcast(long id, intptr_t data); 225extern int queue_broadcast(long id, intptr_t data);
169 226
170extern void mutex_init(struct mutex *m); 227extern void mutex_init(struct mutex *m);
171static inline void spinlock_init(struct mutex *m)
172{ mutex_init(m); } /* Same thing for now */
173extern void mutex_lock(struct mutex *m); 228extern void mutex_lock(struct mutex *m);
174extern void mutex_unlock(struct mutex *m); 229extern void mutex_unlock(struct mutex *m);
175extern void spinlock_lock(struct mutex *m); 230#define SPINLOCK_TASK_SWITCH 0x10
176extern void spinlock_unlock(struct mutex *m); 231#define SPINLOCK_NO_TASK_SWITCH 0x00
177extern void tick_start(unsigned int interval_in_ms); 232extern void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags));
178 233extern void spinlock_lock(struct spinlock *l);
234extern void spinlock_unlock(struct spinlock *l);
235extern int spinlock_lock_w_tmo(struct spinlock *l, int ticks);
236#ifdef HAVE_SEMAPHORE_OBJECTS
237extern void semaphore_init(struct semaphore *s, int max, int start);
238extern void semaphore_wait(struct semaphore *s);
239extern void semaphore_release(struct semaphore *s);
240#endif /* HAVE_SEMAPHORE_OBJECTS */
241#ifdef HAVE_EVENT_OBJECTS
242#define EVENT_AUTOMATIC 0x10
243#define EVENT_MANUAL 0x00
244extern void event_init(struct event *e, unsigned int flags);
245extern void event_wait(struct event *e, unsigned int for_state);
246extern void event_set_state(struct event *e, unsigned int state);
247#endif /* HAVE_EVENT_OBJECTS */
179#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) 248#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
180 249
181#endif 250#endif /* _KERNEL_H_ */
diff --git a/firmware/export/pp5002.h b/firmware/export/pp5002.h
index b2e02f6174..021c248690 100644
--- a/firmware/export/pp5002.h
+++ b/firmware/export/pp5002.h
@@ -139,6 +139,8 @@
139#define CPU_CTL (*(volatile unsigned char *)(0xcf004054)) 139#define CPU_CTL (*(volatile unsigned char *)(0xcf004054))
140#define COP_CTL (*(volatile unsigned char *)(0xcf004058)) 140#define COP_CTL (*(volatile unsigned char *)(0xcf004058))
141 141
142#define PROC_CTL(core) ((&CPU_CTL)[(core)*4])
143
142#define PROC_SLEEP 0xca 144#define PROC_SLEEP 0xca
143#define PROC_WAKE 0xce 145#define PROC_WAKE 0xce
144 146
diff --git a/firmware/export/pp5020.h b/firmware/export/pp5020.h
index 5654a7de63..b591bce695 100644
--- a/firmware/export/pp5020.h
+++ b/firmware/export/pp5020.h
@@ -34,11 +34,15 @@
34/* Each processor has two mailboxes it can write to and two which 34/* Each processor has two mailboxes it can write to and two which
35 it can read from. We define the first to be for sending messages 35 it can read from. We define the first to be for sending messages
36 and the second for replying to messages */ 36 and the second for replying to messages */
37#define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000)) 37#define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000))
38#define COP_MESSAGE (*(volatile unsigned long *)(0x60001004)) 38#define COP_MESSAGE (*(volatile unsigned long *)(0x60001004))
39#define CPU_REPLY (*(volatile unsigned long *)(0x60001008)) 39#define CPU_REPLY (*(volatile unsigned long *)(0x60001008))
40#define COP_REPLY (*(volatile unsigned long *)(0x6000100c)) 40#define COP_REPLY (*(volatile unsigned long *)(0x6000100c))
41#define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010)) 41#define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010))
42
43/* Simple convenient array-like access */
44#define PROC_MESSAGE(core) ((&CPU_MESSAGE)[core])
45#define PROC_REPLY(core) ((&CPU_REPLY)[core])
42 46
43/* Interrupts */ 47/* Interrupts */
44#define CPU_INT_STAT (*(volatile unsigned long*)(0x60004000)) 48#define CPU_INT_STAT (*(volatile unsigned long*)(0x60004000))
@@ -142,6 +146,7 @@
142/* Processors Control */ 146/* Processors Control */
143#define CPU_CTL (*(volatile unsigned long *)(0x60007000)) 147#define CPU_CTL (*(volatile unsigned long *)(0x60007000))
144#define COP_CTL (*(volatile unsigned long *)(0x60007004)) 148#define COP_CTL (*(volatile unsigned long *)(0x60007004))
149#define PROC_CTL(core) ((&CPU_CTL)[core])
145 150
146#define PROC_SLEEP 0x80000000 151#define PROC_SLEEP 0x80000000
147#define PROC_WAIT 0x40000000 152#define PROC_WAIT 0x40000000
diff --git a/firmware/export/system.h b/firmware/export/system.h
index 24e1a2d861..dc10c4545f 100644
--- a/firmware/export/system.h
+++ b/firmware/export/system.h
@@ -45,6 +45,10 @@ bool detect_original_firmware(void);
45#endif 45#endif
46 46
47#ifdef HAVE_ADJUSTABLE_CPU_FREQ 47#ifdef HAVE_ADJUSTABLE_CPU_FREQ
48#if NUM_CORES > 1
49extern struct spinlock boostctrl_spin;
50#endif
51void cpu_boost_init(void);
48#define FREQ cpu_frequency 52#define FREQ cpu_frequency
49void set_cpu_frequency(long frequency); 53void set_cpu_frequency(long frequency);
50#ifdef CPU_BOOST_LOGGING 54#ifdef CPU_BOOST_LOGGING
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 7c683ddde5..20cde1a8e3 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -21,6 +21,7 @@
21 21
22#include "config.h" 22#include "config.h"
23#include <inttypes.h> 23#include <inttypes.h>
24#include <stddef.h>
24#include <stdbool.h> 25#include <stdbool.h>
25 26
26/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works 27/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
@@ -31,13 +32,15 @@
31 * can change it own priority to REALTIME to override user interface and 32 * can change it own priority to REALTIME to override user interface and
32 * prevent playback skipping. 33 * prevent playback skipping.
33 */ 34 */
35#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
36#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
34#define PRIORITY_REALTIME 1 37#define PRIORITY_REALTIME 1
35#define PRIORITY_USER_INTERFACE 4 /* The main thread */ 38#define PRIORITY_USER_INTERFACE 4 /* The main thread */
36#define PRIORITY_RECORDING 4 /* Recording thread */ 39#define PRIORITY_RECORDING 4 /* Recording thread */
37#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */ 40#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
38#define PRIORITY_BUFFERING 4 /* Codec buffering thread */ 41#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
39#define PRIORITY_SYSTEM 6 /* All other firmware threads */ 42#define PRIORITY_SYSTEM 6 /* All other firmware threads */
40#define PRIORITY_BACKGROUND 8 /* Normal application threads */ 43#define PRIORITY_BACKGROUND 8 /* Normal application threads */
41 44
42#if CONFIG_CODEC == SWCODEC 45#if CONFIG_CODEC == SWCODEC
43#define MAXTHREADS 16 46#define MAXTHREADS 16
@@ -47,6 +50,46 @@
47 50
48#define DEFAULT_STACK_SIZE 0x400 /* Bytes */ 51#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
49 52
53/**
54 * "Busy" values that can be swapped into a variable to indicate
55 * that the variable or object pointed to is in use by another processor
56 * core. When accessed, the busy value is swapped-in while the current
57 * value is atomically returned. If the swap returns the busy value,
58 * the processor should retry the operation until some other value is
59 * returned. When modification is finished, the new value should be
60 * written which unlocks it and updates it atomically.
61 *
62 * Procedure:
63 * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
64 *
65 * Modify/examine object at mem location or variable. Create "new_value"
66 * as suitable.
67 *
68 * variable = new_value or curr_value;
69 *
70 * To check a value for busy and perform an operation if not:
71 * curr_value = swap(&variable, BUSY_VALUE);
72 *
73 * if (curr_value != BUSY_VALUE)
74 * {
75 * Modify/examine object at mem location or variable. Create "new_value"
76 * as suitable.
77 * variable = new_value or curr_value;
78 * }
79 * else
80 * {
81 * Do nothing - already busy
82 * }
83 *
84 * Only ever restore when an actual value is returned or else it could leave
85 * the variable locked permanently if another processor unlocked in the
86 * meantime. The next access attempt would deadlock for all processors since
87 * an abandoned busy status would be left behind.
88 */
89#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
90#define STATE_BUSYu8 UINT8_MAX
91#define STATE_BUSYi INT_MIN
92
50#ifndef SIMULATOR 93#ifndef SIMULATOR
51/* Need to keep structures inside the header file because debug_menu 94/* Need to keep structures inside the header file because debug_menu
52 * needs them. */ 95 * needs them. */
@@ -58,7 +101,7 @@ struct regs
58 unsigned int a[5]; /* 28-44 - a2-a6 */ 101 unsigned int a[5]; /* 28-44 - a2-a6 */
59 void *sp; /* 48 - Stack pointer (a7) */ 102 void *sp; /* 48 - Stack pointer (a7) */
60 void *start; /* 52 - Thread start address, or NULL when started */ 103 void *start; /* 52 - Thread start address, or NULL when started */
61} __attribute__((packed)); 104};
62#elif CONFIG_CPU == SH7034 105#elif CONFIG_CPU == SH7034
63struct regs 106struct regs
64{ 107{
@@ -66,7 +109,7 @@ struct regs
66 void *sp; /* 28 - Stack pointer (r15) */ 109 void *sp; /* 28 - Stack pointer (r15) */
67 void *pr; /* 32 - Procedure register */ 110 void *pr; /* 32 - Procedure register */
68 void *start; /* 36 - Thread start address, or NULL when started */ 111 void *start; /* 36 - Thread start address, or NULL when started */
69} __attribute__((packed)); 112};
70#elif defined(CPU_ARM) 113#elif defined(CPU_ARM)
71struct regs 114struct regs
72{ 115{
@@ -74,7 +117,7 @@ struct regs
74 void *sp; /* 32 - Stack pointer (r13) */ 117 void *sp; /* 32 - Stack pointer (r13) */
75 unsigned int lr; /* 36 - r14 (lr) */ 118 unsigned int lr; /* 36 - r14 (lr) */
76 void *start; /* 40 - Thread start address, or NULL when started */ 119 void *start; /* 40 - Thread start address, or NULL when started */
77} __attribute__((packed)); 120};
78#endif /* CONFIG_CPU */ 121#endif /* CONFIG_CPU */
79#else 122#else
80struct regs 123struct regs
@@ -85,58 +128,206 @@ struct regs
85}; 128};
86#endif /* !SIMULATOR */ 129#endif /* !SIMULATOR */
87 130
88#define STATE_RUNNING 0x00000000 131/* NOTE: The use of the word "queue" may also refer to a linked list of
89#define STATE_BLOCKED 0x20000000 132 threads being maintainted that are normally dealt with in FIFO order
90#define STATE_SLEEPING 0x40000000 133 and not nescessarily kernel event_queue */
91#define STATE_BLOCKED_W_TMO 0x60000000 134enum
92 135{
93#define THREAD_STATE_MASK 0x60000000 136 /* States without a timeout must be first */
94#define STATE_ARG_MASK 0x1FFFFFFF 137 STATE_KILLED = 0, /* Thread is killed (default) */
95 138 STATE_RUNNING, /* Thread is currently running */
96#define GET_STATE_ARG(state) (state & STATE_ARG_MASK) 139 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
97#define GET_STATE(state) (state & THREAD_STATE_MASK) 140 /* These states involve adding the thread to the tmo list */
98#define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK))) 141 STATE_SLEEPING, /* Thread is sleeping with a timeout */
99#define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK) 142 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
100 143 /* Miscellaneous states */
101#define STATE_BOOSTED 0x80000000 144 STATE_FROZEN, /* Thread is suspended and will not run until
102#define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED) 145 thread_thaw is called with its ID */
103#define SET_BOOST_STATE(var) (var |= STATE_BOOSTED) 146 THREAD_NUM_STATES,
104 147 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
105struct thread_entry { 148#if NUM_CORES > 1
106 struct regs context; 149 STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
107 const char *name;
108 void *stack;
109 unsigned long statearg;
110 unsigned short stack_size;
111# if NUM_CORES > 1
112 unsigned char core; /* To which core threads belongs to. */
113# endif
114#ifdef HAVE_PRIORITY_SCHEDULING
115 unsigned char priority;
116 unsigned char priority_x;
117 long last_run;
118#endif 150#endif
119 struct thread_entry *next, *prev; 151};
120#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 152
121 intptr_t retval; 153#if NUM_CORES > 1
154#define THREAD_DESTRUCT ((const char *)0x84905617)
122#endif 155#endif
156
157/* Link information for lists thread is in */
158struct thread_entry; /* forward */
159struct thread_list
160{
161 struct thread_entry *prev; /* Previous thread in a list */
162 struct thread_entry *next; /* Next thread in a list */
123}; 163};
124 164
125struct core_entry { 165/* Small objects for core-wise mutual exclusion */
126 struct thread_entry *running; 166#if CONFIG_CORELOCK == SW_CORELOCK
127 struct thread_entry *sleeping; 167/* No reliable atomic instruction available - use Peterson's algorithm */
128 struct thread_entry *waking; 168struct corelock
129 struct thread_entry **wakeup_list; 169{
170 volatile unsigned char myl[NUM_CORES];
171 volatile unsigned char turn;
172} __attribute__((packed));
173
174void corelock_init(struct corelock *cl);
175void corelock_lock(struct corelock *cl);
176int corelock_try_lock(struct corelock *cl);
177void corelock_unlock(struct corelock *cl);
178#elif CONFIG_CORELOCK == CORELOCK_SWAP
179/* Use native atomic swap/exchange instruction */
180struct corelock
181{
182 unsigned char locked;
183} __attribute__((packed));
184
185#define corelock_init(cl) \
186 ({ (cl)->locked = 0; })
187#define corelock_lock(cl) \
188 ({ while (test_and_set(&(cl)->locked, 1)); })
189#define corelock_try_lock(cl) \
190 ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
191#define corelock_unlock(cl) \
192 ({ (cl)->locked = 0; })
193#else
194/* No atomic corelock op needed or just none defined */
195#define corelock_init(cl)
196#define corelock_lock(cl)
197#define corelock_try_lock(cl)
198#define corelock_unlock(cl)
199#endif /* core locking selection */
200
201struct thread_queue
202{
203 struct thread_entry *queue; /* list of threads waiting -
204 _must_ be first member */
205#if CONFIG_CORELOCK == SW_CORELOCK
206 struct corelock cl; /* lock for atomic list operations */
207#endif
208};
209
210/* Information kept in each thread slot
211 * members are arranged according to size - largest first - in order
212 * to ensure both alignment and packing at the same time.
213 */
214struct thread_entry
215{
216 struct regs context; /* Register context at switch -
217 _must_ be first member */
218 void *stack; /* Pointer to top of stack */
219 const char *name; /* Thread name */
220 long tmo_tick; /* Tick when thread should be woken from
221 timeout */
222 struct thread_list l; /* Links for blocked/waking/running -
223 circular linkage in both directions */
224 struct thread_list tmo; /* Links for timeout list -
225 Self-pointer-terminated in reverse direction,
226 NULL-terminated in forward direction */
227 struct thread_queue *bqp; /* Pointer to list variable in kernel
228 object where thread is blocked - used
229 for implicit unblock and explicit wake */
230#if CONFIG_CORELOCK == SW_CORELOCK
231 struct thread_entry **bqnlp; /* Pointer to list variable in kernel
232 object where thread is blocked - non-locked
233 operations will be used */
234#endif
235 struct thread_entry *queue; /* List of threads waiting for thread to be
236 removed */
237#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
238 intptr_t retval; /* Return value from a blocked operation */
239#endif
240#ifdef HAVE_PRIORITY_SCHEDULING
241 long last_run; /* Last tick when started */
242#endif
243 unsigned short stack_size; /* Size of stack in bytes */
130#ifdef HAVE_PRIORITY_SCHEDULING 244#ifdef HAVE_PRIORITY_SCHEDULING
131 long highest_priority; 245 unsigned char priority; /* Current priority */
246 unsigned char priority_x; /* Inherited priority - right now just a
247 runtime guarantee flag */
132#endif 248#endif
249 unsigned char state; /* Thread slot state (STATE_*) */
133#if NUM_CORES > 1 250#if NUM_CORES > 1
134 volatile bool lock_issued; 251 unsigned char core; /* The core to which thread belongs */
135 volatile bool kernel_running; 252#endif
253#ifdef HAVE_SCHEDULER_BOOSTCTRL
254 unsigned char boosted; /* CPU frequency boost flag */
255#endif
256#if CONFIG_CORELOCK == SW_CORELOCK
257 struct corelock cl; /* Corelock to lock thread slot */
258#endif
259};
260
261#if NUM_CORES > 1
262/* Operations to be performed just before stopping a thread and starting
263 a new one if specified before calling switch_thread */
264#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
265#if CONFIG_CORELOCK == CORELOCK_SWAP
266#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
267#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
268#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
269#endif /* CONFIG_CORELOCK */
270#define TBOP_UNLOCK_CORELOCK 0x04
271#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
272#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
273#define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */
274#define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */
275
276struct thread_blk_ops
277{
278 int irq_level; /* new IRQ level to set */
279#if CONFIG_CORELOCK != SW_CORELOCK
280 union
281 {
282 int var_iv; /* int variable value to set */
283 uint8_t var_u8v; /* unsigned char valur to set */
284 struct thread_entry *list_v; /* list pointer queue value to set */
285 };
286#endif
287 union
288 {
289#if CONFIG_CORELOCK != SW_CORELOCK
290 int *var_ip; /* pointer to int variable */
291 uint8_t *var_u8p; /* pointer to unsigned char varuable */
292#endif
293 struct thread_queue *list_p; /* pointer to list variable */
294 };
295#if CONFIG_CORELOCK == SW_CORELOCK
296 struct corelock *cl_p; /* corelock to unlock */
297 struct thread_entry *thread; /* thread to unlock */
298#elif CONFIG_CORELOCK == CORELOCK_SWAP
299 unsigned char state; /* new thread state (performs unlock) */
300#endif /* SOFTWARE_CORELOCK */
301 unsigned char flags; /* TBOP_* flags */
302};
303#endif /* NUM_CORES > 1 */
304
305/* Information kept for each core
306 * Member are arranged for the same reason as in thread_entry
307 */
308struct core_entry
309{
310 /* "Active" lists - core is constantly active on these and are never
311 locked and interrupts do not access them */
312 struct thread_entry *running; /* threads that are running */
313 struct thread_entry *timeout; /* threads that are on a timeout before
314 running again */
315 /* "Shared" lists - cores interact in a synchronized manner - access
316 is locked between cores and interrupts */
317 struct thread_queue waking; /* intermediate locked list that
318 hold threads other core should wake up
319 on next task switch */
320 long next_tmo_check; /* soonest time to check tmo threads */
321#if NUM_CORES > 1
322 struct thread_blk_ops blk_ops; /* operations to perform when
323 blocking a thread */
324#else
325 #define STAY_IRQ_LEVEL (-1)
326 int irq_level; /* sets the irq level to irq_level */
327#endif /* NUM_CORES */
328#ifdef HAVE_PRIORITY_SCHEDULING
329 unsigned char highest_priority;
136#endif 330#endif
137 long last_tick;
138 int switch_to_irq_level;
139 #define STAY_IRQ_LEVEL -1
140}; 331};
141 332
142#ifdef HAVE_PRIORITY_SCHEDULING 333#ifdef HAVE_PRIORITY_SCHEDULING
@@ -145,82 +336,210 @@ struct core_entry {
145#define IF_PRIO(...) 336#define IF_PRIO(...)
146#endif 337#endif
147 338
148/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
149 * Just use it for ARM, Coldfire and whatever else well...why not?
150 */
151
152/* Macros generate better code than an inline function is this case */ 339/* Macros generate better code than an inline function is this case */
153#if (defined (CPU_PP) || defined (CPU_ARM)) && CONFIG_CPU != PP5020 340#if (defined (CPU_PP) || defined (CPU_ARM))
154#define test_and_set(x_, v_) \ 341/* atomic */
155({ \ 342#ifdef SOFTWARE_CORELOCK
156 uint32_t old; \ 343#define test_and_set(a, v, cl) \
157 asm volatile ( \ 344 xchg8((a), (v), (cl))
158 "swpb %[old], %[v], [%[x]] \r\n" \ 345/* atomic */
159 : [old]"=r"(old) \ 346#define xchg8(a, v, cl) \
160 : [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \ 347({ uint32_t o; \
161 ); \ 348 corelock_lock(cl); \
162 old; \ 349 o = *(uint8_t *)(a); \
163 }) 350 *(uint8_t *)(a) = (v); \
351 corelock_unlock(cl); \
352 o; })
353#define xchg32(a, v, cl) \
354({ uint32_t o; \
355 corelock_lock(cl); \
356 o = *(uint32_t *)(a); \
357 *(uint32_t *)(a) = (v); \
358 corelock_unlock(cl); \
359 o; })
360#define xchgptr(a, v, cl) \
361({ typeof (*(a)) o; \
362 corelock_lock(cl); \
363 o = *(a); \
364 *(a) = (v); \
365 corelock_unlock(cl); \
366 o; })
367#else
368/* atomic */
369#define test_and_set(a, v, ...) \
370 xchg8((a), (v))
371#define xchg8(a, v, ...) \
372({ uint32_t o; \
373 asm volatile( \
374 "swpb %0, %1, [%2]" \
375 : "=r"(o) \
376 : "r"(v), \
377 "r"((uint8_t*)(a))); \
378 o; })
379/* atomic */
380#define xchg32(a, v, ...) \
381({ uint32_t o; \
382 asm volatile( \
383 "swp %0, %1, [%2]" \
384 : "=r"(o) \
385 : "r"((uint32_t)(v)), \
386 "r"((uint32_t*)(a))); \
387 o; })
388/* atomic */
389#define xchgptr(a, v, ...) \
390({ typeof (*(a)) o; \
391 asm volatile( \
392 "swp %0, %1, [%2]" \
393 : "=r"(o) \
394 : "r"(v), "r"(a)); \
395 o; })
396#endif /* SOFTWARE_CORELOCK */
164#elif defined (CPU_COLDFIRE) 397#elif defined (CPU_COLDFIRE)
165#define test_and_set(x_, v_) \ 398/* atomic */
166({ \ 399/* one branch will be optimized away if v is a constant expression */
167 uint8_t old; \ 400#define test_and_set(a, v, ...) \
168 asm volatile ( \ 401({ uint32_t o = 0; \
169 "bset.l %[v], (%[x]) \r\n" \ 402 if (v) { \
170 "sne.b %[old] \r\n" \ 403 asm volatile ( \
171 : [old]"=d,d"(old) \ 404 "bset.b #0, (%0)" \
172 : [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \ 405 : : "a"((uint8_t*)(a)) \
173 ); \ 406 : "cc"); \
174 old; \ 407 } else { \
175 }) 408 asm volatile ( \
409 "bclr.b #0, (%0)" \
410 : : "a"((uint8_t*)(a)) \
411 : "cc"); \
412 } \
413 asm volatile ("sne.b %0" \
414 : "+d"(o)); \
415 o; })
176#elif CONFIG_CPU == SH7034 416#elif CONFIG_CPU == SH7034
177#define test_and_set(x_, v_) \ 417/* atomic */
178({ \ 418#define test_and_set(a, v, ...) \
179 uint32_t old; \ 419({ uint32_t o; \
180 asm volatile ( \ 420 asm volatile ( \
181 "tas.b @%[x] \r\n" \ 421 "tas.b @%2 \n" \
182 "mov #-1, %[old] \r\n" \ 422 "mov #-1, %0 \n" \
183 "negc %[old], %[old] \r\n" \ 423 "negc %0, %0 \n" \
184 : [old]"=r"(old) \ 424 : "=r"(o) \
185 : [v]"M"((uint32_t)v_), /* Value of v_ must be 1 */ \ 425 : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
186 [x]"r"((uint8_t *)x_) \ 426 "r"((uint8_t *)(a))); \
187 ); \ 427 o; })
188 old; \ 428#endif /* CONFIG_CPU == */
189 }) 429
190#else 430/* defaults for no asm version */
191/* default for no asm version */ 431#ifndef test_and_set
192#define test_and_set(x_, v_) \ 432/* not atomic */
193({ \ 433#define test_and_set(a, v, ...) \
194 uint32_t old = *(uint32_t *)x_; \ 434({ uint32_t o = *(uint8_t *)(a); \
195 *(uint32_t *)x_ = v_; \ 435 *(uint8_t *)(a) = (v); \
196 old; \ 436 o; })
197 }) 437#endif /* test_and_set */
198#endif 438#ifndef xchg8
439/* not atomic */
440#define xchg8(a, v, ...) \
441({ uint32_t o = *(uint8_t *)(a); \
442 *(uint8_t *)(a) = (v); \
443 o; })
444#endif /* xchg8 */
445#ifndef xchg32
446/* not atomic */
447#define xchg32(a, v, ...) \
448({ uint32_t o = *(uint32_t *)(a); \
449 *(uint32_t *)(a) = (v); \
450 o; })
451#endif /* xchg32 */
452#ifndef xchgptr
453/* not atomic */
454#define xchgptr(a, v, ...) \
455({ typeof (*(a)) o = *(a); \
456 *(a) = (v); \
457 o; })
458#endif /* xchgptr */
199 459
460void core_idle(void);
461void core_wake(IF_COP_VOID(unsigned int core));
462
463#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
200struct thread_entry* 464struct thread_entry*
201 create_thread(void (*function)(void), void* stack, int stack_size, 465 create_thread(void (*function)(void), void* stack, int stack_size,
202 const char *name IF_PRIO(, int priority) 466 unsigned flags, const char *name
203 IF_COP(, unsigned int core, bool fallback)); 467 IF_PRIO(, int priority)
468 IF_COP(, unsigned int core));
204 469
205#ifdef HAVE_SCHEDULER_BOOSTCTRL 470#ifdef HAVE_SCHEDULER_BOOSTCTRL
206void trigger_cpu_boost(void); 471void trigger_cpu_boost(void);
207#else 472#else
208#define trigger_cpu_boost() 473#define trigger_cpu_boost()
209#endif 474#endif
210 475void thread_thaw(struct thread_entry *thread);
476void thread_wait(struct thread_entry *thread);
211void remove_thread(struct thread_entry *thread); 477void remove_thread(struct thread_entry *thread);
212void switch_thread(bool save_context, struct thread_entry **blocked_list); 478void switch_thread(struct thread_entry *old);
213void sleep_thread(int ticks); 479void sleep_thread(int ticks);
214void block_thread(struct thread_entry **thread); 480
215void block_thread_w_tmo(struct thread_entry **thread, int timeout); 481/**
216void set_irq_level_and_block_thread(struct thread_entry **thread, int level); 482 * Setup to allow using thread queues as locked or non-locked without speed
217void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list, 483 * sacrifices in both core locking types.
218 int timeout, int level); 484 *
219void wakeup_thread(struct thread_entry **thread); 485 * The blocking/waking function inline two different version of the real
220void wakeup_thread_irq_safe(struct thread_entry **thread); 486 * function into the stubs when a software or other separate core locking
487 * mechanism is employed.
488 *
489 * When a simple test-and-set or similar instruction is available, locking
490 * has no cost and so one version is used and the internal worker is called
491 * directly.
492 *
493 * CORELOCK_NONE is treated the same as when an atomic instruction can be
494 * used.
495 */
496
497/* Blocks the current thread on a thread queue */
498#if CONFIG_CORELOCK == SW_CORELOCK
499void block_thread(struct thread_queue *tq);
500void block_thread_no_listlock(struct thread_entry **list);
501#else
502void _block_thread(struct thread_queue *tq);
503static inline void block_thread(struct thread_queue *tq)
504 { _block_thread(tq); }
505static inline void block_thread_no_listlock(struct thread_entry **list)
506 { _block_thread((struct thread_queue *)list); }
507#endif /* CONFIG_CORELOCK */
508
509/* Blocks the current thread on a thread queue for a max amount of time
510 * There is no "_no_listlock" version because timeout blocks without sync on
511 * the blocking queues is not permitted since either core could access the
512 * list at any time to do an implicit wake. In other words, objects with
513 * timeout support require lockable queues. */
514void block_thread_w_tmo(struct thread_queue *tq, int timeout);
515
516/* Wakes up the thread at the head of the queue */
517#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
518#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
519#if CONFIG_CORELOCK == SW_CORELOCK
520struct thread_entry * wakeup_thread(struct thread_queue *tq);
521struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
522#else
523struct thread_entry * _wakeup_thread(struct thread_queue *list);
524static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
525 { return _wakeup_thread(tq); }
526static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
527 { return _wakeup_thread((struct thread_queue *)list); }
528#endif /* CONFIG_CORELOCK */
529
530/* Initialize a thread_queue object. */
531static inline void thread_queue_init(struct thread_queue *tq)
532 { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
533/* A convenience function for waking an entire queue of threads. */
534static inline void thread_queue_wake(struct thread_queue *tq)
535 { while (wakeup_thread(tq) != NULL); }
536/* The no-listlock version of thread_queue_wake() */
537static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
538 { while (wakeup_thread_no_listlock(list) != NULL); }
539
221#ifdef HAVE_PRIORITY_SCHEDULING 540#ifdef HAVE_PRIORITY_SCHEDULING
222int thread_set_priority(struct thread_entry *thread, int priority); 541int thread_set_priority(struct thread_entry *thread, int priority);
223int thread_get_priority(struct thread_entry *thread); 542int thread_get_priority(struct thread_entry *thread);
224/* Yield that guarantees thread execution once per round regardless of 543/* Yield that guarantees thread execution once per round regardless of
225 thread's scheduler priority - basically a transient realtime boost 544 thread's scheduler priority - basically a transient realtime boost
226 without altering the scheduler's thread precedence. */ 545 without altering the scheduler's thread precedence. */
@@ -228,17 +547,20 @@ void priority_yield(void);
228#else 547#else
229#define priority_yield yield 548#define priority_yield yield
230#endif /* HAVE_PRIORITY_SCHEDULING */ 549#endif /* HAVE_PRIORITY_SCHEDULING */
550#if NUM_CORES > 1
551unsigned int switch_core(unsigned int new_core);
552#endif
231struct thread_entry * thread_get_current(void); 553struct thread_entry * thread_get_current(void);
232void init_threads(void); 554void init_threads(void);
233int thread_stack_usage(const struct thread_entry *thread); 555int thread_stack_usage(const struct thread_entry *thread);
234#if NUM_CORES > 1 556#if NUM_CORES > 1
235int idle_stack_usage(unsigned int core); 557int idle_stack_usage(unsigned int core);
236#endif 558#endif
237int thread_get_status(const struct thread_entry *thread); 559unsigned thread_get_status(const struct thread_entry *thread);
238void thread_get_name(char *buffer, int size, 560void thread_get_name(char *buffer, int size,
239 struct thread_entry *thread); 561 struct thread_entry *thread);
240#ifdef RB_PROFILE 562#ifdef RB_PROFILE
241void profile_thread(void); 563void profile_thread(void);
242#endif 564#endif
243 565
244#endif 566#endif /* THREAD_H */