summaryrefslogtreecommitdiff
path: root/firmware/export/kernel.h
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
commita9b2fb5ee3114fe835f6515b6aeae7454f66d821 (patch)
treefc4e96d0c1f215565918406c8827b16b806c1345 /firmware/export/kernel.h
parenta3fbbc9fa7e12fd3fce122bbd235dc362050e024 (diff)
downloadrockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.tar.gz
rockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.zip
Finally full multicore support for PortalPlayer 502x targets with an eye towards the possibility of other types. All SVN targets the low-lag code to speed up blocking operations. Most files are modified here simple due to a name change to actually support a real event object and a param change to create_thread. Add some use of new features but just sit on things for a bit and leave full integration for later. Work will continue on to address size on sensitive targets and simplify things if possible. Any PP target having problems with SWP can easily be changed to sw corelocks with one #define change in config.h though only PP5020 has shown an issue and seems to work without any difficulties.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15134 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/export/kernel.h')
-rw-r--r--firmware/export/kernel.h103
1 files changed, 86 insertions, 17 deletions
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 3d70e49a4c..a72e004b33 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -23,6 +23,8 @@
23#include <inttypes.h> 23#include <inttypes.h>
24#include "config.h" 24#include "config.h"
25 25
26#include "thread.h"
27
26/* wrap-safe macros for tick comparison */ 28/* wrap-safe macros for tick comparison */
27#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0) 29#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
28#define TIME_BEFORE(a,b) TIME_AFTER(b,a) 30#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
@@ -31,6 +33,7 @@
31 33
32#define MAX_NUM_TICK_TASKS 8 34#define MAX_NUM_TICK_TASKS 8
33 35
36#define MAX_NUM_QUEUES 32
34#define QUEUE_LENGTH 16 /* MUST be a power of 2 */ 37#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
35#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1) 38#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
36 39
@@ -72,7 +75,7 @@
72#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0) 75#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
73#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1) 76#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
74 77
75struct event 78struct queue_event
76{ 79{
77 long id; 80 long id;
78 intptr_t data; 81 intptr_t data;
@@ -91,20 +94,66 @@ struct queue_sender_list
91 94
92struct event_queue 95struct event_queue
93{ 96{
94 struct event events[QUEUE_LENGTH]; 97 struct thread_queue queue; /* Waiter list */
95 struct thread_entry *thread; 98 struct queue_event events[QUEUE_LENGTH]; /* list of events */
96 unsigned int read; 99 unsigned int read; /* head of queue */
97 unsigned int write; 100 unsigned int write; /* tail of queue */
98#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 101#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
99 struct queue_sender_list *send; 102 struct queue_sender_list *send; /* list of threads waiting for
103 reply to an event */
104#endif
105#if NUM_CORES > 1
106 struct corelock cl; /* inter-core sync */
100#endif 107#endif
101}; 108};
102 109
103struct mutex 110struct mutex
104{ 111{
105 uint32_t locked; 112 struct thread_entry *queue; /* Waiter list */
106 struct thread_entry *thread; 113#if CONFIG_CORELOCK == SW_CORELOCK
114 struct corelock cl; /* inter-core sync */
115#endif
116 struct thread_entry *thread; /* thread that owns lock */
117 int count; /* lock owner recursion count */
118 unsigned char locked; /* locked semaphore */
119};
120
121struct spinlock
122{
123#if NUM_CORES > 1
124 struct corelock cl; /* inter-core sync */
125#endif
126 struct thread_entry *thread; /* lock owner */
127 int count; /* lock owner recursion count */
128 unsigned char locked; /* is locked if nonzero */
129#if NUM_CORES > 1
130 unsigned char task_switch; /* can task switch? */
131#endif
132};
133
134#ifdef HAVE_SEMAPHORE_OBJECTS
135struct semaphore
136{
137 struct thread_entry *queue; /* Waiter list */
138#if CONFIG_CORELOCK == SW_CORELOCK
139 struct corelock cl; /* inter-core sync */
140#endif
141 int count; /* # of waits remaining before unsignaled */
142 int max; /* maximum # of waits to remain signaled */
143};
144#endif
145
146#ifdef HAVE_EVENT_OBJECTS
147struct event
148{
149 struct thread_entry *queues[2]; /* waiters for each state */
150#if CONFIG_CORELOCK == SW_CORELOCK
151 struct corelock cl; /* inter-core sync */
152#endif
153 unsigned char automatic; /* event performs auto-reset */
154 unsigned char state; /* state: 1 = signaled */
107}; 155};
156#endif
108 157
109/* global tick variable */ 158/* global tick variable */
110#if defined(CPU_PP) && defined(BOOTLOADER) 159#if defined(CPU_PP) && defined(BOOTLOADER)
@@ -127,6 +176,7 @@ extern void yield(void);
127extern void sleep(int ticks); 176extern void sleep(int ticks);
128int tick_add_task(void (*f)(void)); 177int tick_add_task(void (*f)(void));
129int tick_remove_task(void (*f)(void)); 178int tick_remove_task(void (*f)(void));
179extern void tick_start(unsigned int interval_in_ms);
130 180
131struct timeout; 181struct timeout;
132 182
@@ -150,10 +200,17 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
150 int ticks, intptr_t data); 200 int ticks, intptr_t data);
151void timeout_cancel(struct timeout *tmo); 201void timeout_cancel(struct timeout *tmo);
152 202
203#define STATE_NONSIGNALED 0
204#define STATE_SIGNALED 1
205
206#define WAIT_TIMEDOUT (-1)
207#define WAIT_SUCCEEDED 1
208
153extern void queue_init(struct event_queue *q, bool register_queue); 209extern void queue_init(struct event_queue *q, bool register_queue);
154extern void queue_delete(struct event_queue *q); 210extern void queue_delete(struct event_queue *q);
155extern void queue_wait(struct event_queue *q, struct event *ev); 211extern void queue_wait(struct event_queue *q, struct queue_event *ev);
156extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks); 212extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
213 int ticks);
157extern void queue_post(struct event_queue *q, long id, intptr_t data); 214extern void queue_post(struct event_queue *q, long id, intptr_t data);
158#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 215#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
159extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send); 216extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send);
@@ -168,14 +225,26 @@ extern int queue_count(const struct event_queue *q);
168extern int queue_broadcast(long id, intptr_t data); 225extern int queue_broadcast(long id, intptr_t data);
169 226
170extern void mutex_init(struct mutex *m); 227extern void mutex_init(struct mutex *m);
171static inline void spinlock_init(struct mutex *m)
172{ mutex_init(m); } /* Same thing for now */
173extern void mutex_lock(struct mutex *m); 228extern void mutex_lock(struct mutex *m);
174extern void mutex_unlock(struct mutex *m); 229extern void mutex_unlock(struct mutex *m);
175extern void spinlock_lock(struct mutex *m); 230#define SPINLOCK_TASK_SWITCH 0x10
176extern void spinlock_unlock(struct mutex *m); 231#define SPINLOCK_NO_TASK_SWITCH 0x00
177extern void tick_start(unsigned int interval_in_ms); 232extern void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags));
178 233extern void spinlock_lock(struct spinlock *l);
234extern void spinlock_unlock(struct spinlock *l);
235extern int spinlock_lock_w_tmo(struct spinlock *l, int ticks);
236#ifdef HAVE_SEMAPHORE_OBJECTS
237extern void semaphore_init(struct semaphore *s, int max, int start);
238extern void semaphore_wait(struct semaphore *s);
239extern void semaphore_release(struct semaphore *s);
240#endif /* HAVE_SEMAPHORE_OBJECTS */
241#ifdef HAVE_EVENT_OBJECTS
242#define EVENT_AUTOMATIC 0x10
243#define EVENT_MANUAL 0x00
244extern void event_init(struct event *e, unsigned int flags);
245extern void event_wait(struct event *e, unsigned int for_state);
246extern void event_set_state(struct event *e, unsigned int state);
247#endif /* HAVE_EVENT_OBJECTS */
179#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) 248#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
180 249
181#endif 250#endif /* _KERNEL_H_ */