summaryrefslogtreecommitdiff
path: root/firmware/export/kernel.h
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
committerMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
commit27cf67733936abd75fcb1f8da765977cd75906ee (patch)
treef894211a8a0c77b402dd3250b2bee2d17dcfe13f /firmware/export/kernel.h
parentbc2f8fd8f38a3e010cd67bbac358f6e9991153c6 (diff)
downloadrockbox-27cf67733936abd75fcb1f8da765977cd75906ee.tar.gz
rockbox-27cf67733936abd75fcb1f8da765977cd75906ee.zip
Add a complete priority inheritance implementation to the scheduler (all mutex ownership and queue_send calls are inheritable). Priorities are differential so that dispatch depends on the runnable range of priorities. Codec priority can therefore be raised in small steps (pcmbuf updated to enable). Simplify the kernel functions to ease implementation and use the same kernel.c for both sim and target (I'm tired of maintaining two ;_). 1) Not sure if a minor audio break at first buffering issue will exist on large-sector disks (the main mutex speed issue was genuinely resolved earlier). At this point it's best dealt with at the buffering level. It seems a larger filechunk could be used again. 2) Perhaps 64-bit sims will have some minor issues (finicky) but a backroll of the code of concern there is a 5-minute job. All kernel objects become incompatible so a full rebuild and update is needed.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16791 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/export/kernel.h')
-rw-r--r--firmware/export/kernel.h94
1 files changed, 63 insertions, 31 deletions
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 70a2f98d59..78403c8b7d 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -76,6 +76,8 @@
76#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0) 76#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
77#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1) 77#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
78 78
79#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
80
79struct queue_event 81struct queue_event
80{ 82{
81 long id; 83 long id;
@@ -87,68 +89,92 @@ struct queue_sender_list
87{ 89{
88 /* If non-NULL, there is a thread waiting for the corresponding event */ 90 /* If non-NULL, there is a thread waiting for the corresponding event */
89 /* Must be statically allocated to put in non-cached ram. */ 91 /* Must be statically allocated to put in non-cached ram. */
90 struct thread_entry *senders[QUEUE_LENGTH]; 92 struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
93 struct thread_entry *list; /* list of senders in map */
91 /* Send info for last message dequeued or NULL if replied or not sent */ 94 /* Send info for last message dequeued or NULL if replied or not sent */
92 struct thread_entry *curr_sender; 95 struct thread_entry *curr_sender;
96#ifdef HAVE_PRIORITY_SCHEDULING
97 struct blocker blocker;
98#endif
93}; 99};
94#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 100#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
95 101
102#ifdef HAVE_PRIORITY_SCHEDULING
103#define QUEUE_GET_THREAD(q) \
104 (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
105#else
106/* Queue without priority enabled have no owner provision _at this time_ */
107#define QUEUE_GET_THREAD(q) \
108 (NULL)
109#endif
110
96struct event_queue 111struct event_queue
97{ 112{
98 struct thread_queue queue; /* Waiter list */ 113 struct thread_entry *queue; /* waiter list */
99 struct queue_event events[QUEUE_LENGTH]; /* list of events */ 114 struct queue_event events[QUEUE_LENGTH]; /* list of events */
100 unsigned int read; /* head of queue */ 115 unsigned int read; /* head of queue */
101 unsigned int write; /* tail of queue */ 116 unsigned int write; /* tail of queue */
102#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 117#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
103 struct queue_sender_list *send; /* list of threads waiting for 118 struct queue_sender_list *send; /* list of threads waiting for
104 reply to an event */ 119 reply to an event */
120#ifdef HAVE_PRIORITY_SCHEDULING
121 struct blocker *blocker_p; /* priority inheritance info
122 for sync message senders */
105#endif 123#endif
106#if NUM_CORES > 1
107 struct corelock cl; /* inter-core sync */
108#endif 124#endif
125 IF_COP( struct corelock cl; ) /* multiprocessor sync */
109}; 126};
110 127
128#ifdef HAVE_PRIORITY_SCHEDULING
129#define MUTEX_SET_THREAD(m, t) ((m)->blocker.thread = (t))
130#define MUTEX_GET_THREAD(m) ((m)->blocker.thread)
131#else
132#define MUTEX_SET_THREAD(m, t) ((m)->thread = (t))
133#define MUTEX_GET_THREAD(m) ((m)->thread)
134#endif
135
111struct mutex 136struct mutex
112{ 137{
113 struct thread_entry *queue; /* Waiter list */ 138 struct thread_entry *queue; /* waiter list */
114#if CONFIG_CORELOCK == SW_CORELOCK 139 int count; /* lock owner recursion count */
115 struct corelock cl; /* inter-core sync */ 140#ifdef HAVE_PRIORITY_SCHEDULING
141 struct blocker blocker; /* priority inheritance info
142 for waiters */
143 bool no_preempt; /* don't allow higher-priority thread
144 to be scheduled even if woken */
145#else
146 struct thread_entry *thread;
116#endif 147#endif
117 struct thread_entry *thread; /* thread that owns lock */ 148 IF_COP( struct corelock cl; ) /* multiprocessor sync */
118 int count; /* lock owner recursion count */ 149 unsigned char locked; /* locked semaphore */
119 unsigned char locked; /* locked semaphore */
120}; 150};
121 151
122#if NUM_CORES > 1 152#if NUM_CORES > 1
123struct spinlock 153struct spinlock
124{ 154{
125 struct corelock cl; /* inter-core sync */ 155 struct thread_entry *thread; /* lock owner */
126 struct thread_entry *thread; /* lock owner */ 156 int count; /* lock owner recursion count */
127 int count; /* lock owner recursion count */ 157 struct corelock cl; /* multiprocessor sync */
128}; 158};
129#endif 159#endif
130 160
131#ifdef HAVE_SEMAPHORE_OBJECTS 161#ifdef HAVE_SEMAPHORE_OBJECTS
132struct semaphore 162struct semaphore
133{ 163{
134 struct thread_entry *queue; /* Waiter list */ 164 struct thread_entry *queue; /* Waiter list */
135#if CONFIG_CORELOCK == SW_CORELOCK 165 int count; /* # of waits remaining before unsignaled */
136 struct corelock cl; /* inter-core sync */ 166 int max; /* maximum # of waits to remain signaled */
137#endif 167 IF_COP( struct corelock cl; ) /* multiprocessor sync */
138 int count; /* # of waits remaining before unsignaled */
139 int max; /* maximum # of waits to remain signaled */
140}; 168};
141#endif 169#endif
142 170
143#ifdef HAVE_EVENT_OBJECTS 171#ifdef HAVE_EVENT_OBJECTS
144struct event 172struct event
145{ 173{
146 struct thread_entry *queues[2]; /* waiters for each state */ 174 struct thread_entry *queues[2]; /* waiters for each state */
147#if CONFIG_CORELOCK == SW_CORELOCK 175 unsigned char automatic; /* event performs auto-reset */
148 struct corelock cl; /* inter-core sync */ 176 unsigned char state; /* state: 1 = signaled */
149#endif 177 IF_COP( struct corelock cl; ) /* multiprocessor sync */
150 unsigned char automatic; /* event performs auto-reset */
151 unsigned char state; /* state: 1 = signaled */
152}; 178};
153#endif 179#endif
154 180
@@ -208,7 +234,9 @@ extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
208 int ticks); 234 int ticks);
209extern void queue_post(struct event_queue *q, long id, intptr_t data); 235extern void queue_post(struct event_queue *q, long id, intptr_t data);
210#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 236#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
211extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send); 237extern void queue_enable_queue_send(struct event_queue *q,
238 struct queue_sender_list *send,
239 struct thread_entry *owner);
212extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data); 240extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
213extern void queue_reply(struct event_queue *q, intptr_t retval); 241extern void queue_reply(struct event_queue *q, intptr_t retval);
214extern bool queue_in_queue_send(struct event_queue *q); 242extern bool queue_in_queue_send(struct event_queue *q);
@@ -223,6 +251,11 @@ extern int queue_broadcast(long id, intptr_t data);
223extern void mutex_init(struct mutex *m); 251extern void mutex_init(struct mutex *m);
224extern void mutex_lock(struct mutex *m); 252extern void mutex_lock(struct mutex *m);
225extern void mutex_unlock(struct mutex *m); 253extern void mutex_unlock(struct mutex *m);
254#ifdef HAVE_PRIORITY_SCHEDULING
255/* Temporary function to disable mutex preempting a thread on unlock */
256static inline void mutex_set_preempt(struct mutex *m, bool preempt)
257 { m->no_preempt = !preempt; }
258#endif
226#if NUM_CORES > 1 259#if NUM_CORES > 1
227extern void spinlock_init(struct spinlock *l); 260extern void spinlock_init(struct spinlock *l);
228extern void spinlock_lock(struct spinlock *l); 261extern void spinlock_lock(struct spinlock *l);
@@ -240,6 +273,5 @@ extern void event_init(struct event *e, unsigned int flags);
240extern void event_wait(struct event *e, unsigned int for_state); 273extern void event_wait(struct event *e, unsigned int for_state);
241extern void event_set_state(struct event *e, unsigned int state); 274extern void event_set_state(struct event *e, unsigned int state);
242#endif /* HAVE_EVENT_OBJECTS */ 275#endif /* HAVE_EVENT_OBJECTS */
243#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
244 276
245#endif /* _KERNEL_H_ */ 277#endif /* _KERNEL_H_ */