summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c731
1 files changed, 614 insertions, 117 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 1b6e9f933b..4e56c2919a 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -28,15 +28,37 @@
28#include "avic-imx31.h" 28#include "avic-imx31.h"
29#endif 29#endif
30 30
31/* Make this nonzero to enable more elaborate checks on objects */
32#ifdef DEBUG
33#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
34#else
35#define KERNEL_OBJECT_CHECKS 0
36#endif
37
38#if KERNEL_OBJECT_CHECKS
39#define KERNEL_ASSERT(exp, msg...) \
40 ({ if (!({ exp; })) panicf(msg); })
41#else
42#define KERNEL_ASSERT(exp, msg...) ({})
43#endif
44
31#if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER) 45#if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER)
32volatile long current_tick NOCACHEDATA_ATTR = 0; 46volatile long current_tick NOCACHEDATA_ATTR = 0;
33#endif 47#endif
34 48
35void (*tick_funcs[MAX_NUM_TICK_TASKS])(void); 49void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
36 50
51extern struct core_entry cores[NUM_CORES];
52
37/* This array holds all queues that are initiated. It is used for broadcast. */ 53/* This array holds all queues that are initiated. It is used for broadcast. */
38static struct event_queue *all_queues[32] NOCACHEBSS_ATTR; 54static struct
39static int num_queues NOCACHEBSS_ATTR; 55{
56 int count;
57 struct event_queue *queues[MAX_NUM_QUEUES];
58#if NUM_CORES > 1
59 struct corelock cl;
60#endif
61} all_queues NOCACHEBSS_ATTR;
40 62
41/**************************************************************************** 63/****************************************************************************
42 * Standard kernel stuff 64 * Standard kernel stuff
@@ -52,8 +74,8 @@ void kernel_init(void)
52 if (CURRENT_CORE == CPU) 74 if (CURRENT_CORE == CPU)
53 { 75 {
54 memset(tick_funcs, 0, sizeof(tick_funcs)); 76 memset(tick_funcs, 0, sizeof(tick_funcs));
55 num_queues = 0; 77 memset(&all_queues, 0, sizeof(all_queues));
56 memset(all_queues, 0, sizeof(all_queues)); 78 corelock_init(&all_queues.cl);
57 tick_start(1000/HZ); 79 tick_start(1000/HZ);
58 } 80 }
59} 81}
@@ -77,7 +99,7 @@ void sleep(int ticks)
77#elif defined(CPU_PP) && defined(BOOTLOADER) 99#elif defined(CPU_PP) && defined(BOOTLOADER)
78 unsigned stop = USEC_TIMER + ticks * (1000000/HZ); 100 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
79 while (TIME_BEFORE(USEC_TIMER, stop)) 101 while (TIME_BEFORE(USEC_TIMER, stop))
80 switch_thread(true,NULL); 102 switch_thread(NULL);
81#else 103#else
82 sleep_thread(ticks); 104 sleep_thread(ticks);
83#endif 105#endif
@@ -88,7 +110,7 @@ void yield(void)
88#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER)) 110#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER))
89 /* Some targets don't like yielding in the bootloader */ 111 /* Some targets don't like yielding in the bootloader */
90#else 112#else
91 switch_thread(true, NULL); 113 switch_thread(NULL);
92#endif 114#endif
93} 115}
94 116
@@ -104,7 +126,7 @@ static void queue_fetch_sender(struct queue_sender_list *send,
104{ 126{
105 struct thread_entry **spp = &send->senders[i]; 127 struct thread_entry **spp = &send->senders[i];
106 128
107 if (*spp) 129 if(*spp)
108 { 130 {
109 send->curr_sender = *spp; 131 send->curr_sender = *spp;
110 *spp = NULL; 132 *spp = NULL;
@@ -124,18 +146,16 @@ static void queue_release_sender(struct thread_entry **sender,
124 intptr_t retval) 146 intptr_t retval)
125{ 147{
126 (*sender)->retval = retval; 148 (*sender)->retval = retval;
127 wakeup_thread_irq_safe(sender); 149 wakeup_thread_no_listlock(sender);
128#if 0
129 /* This should _never_ happen - there must never be multiple 150 /* This should _never_ happen - there must never be multiple
130 threads in this list and it is a corrupt state */ 151 threads in this list and it is a corrupt state */
131 if (*sender != NULL) 152 KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender);
132 panicf("Queue: send slot ovf");
133#endif
134} 153}
135 154
136/* Releases any waiting threads that are queued with queue_send - 155/* Releases any waiting threads that are queued with queue_send -
137 * reply with 0. 156 * reply with 0.
138 * Disable IRQs before calling since it uses queue_release_sender. 157 * Disable IRQs and lock before calling since it uses
158 * queue_release_sender.
139 */ 159 */
140static void queue_release_all_senders(struct event_queue *q) 160static void queue_release_all_senders(struct event_queue *q)
141{ 161{
@@ -156,79 +176,114 @@ static void queue_release_all_senders(struct event_queue *q)
156} 176}
157 177
158/* Enables queue_send on the specified queue - caller allocates the extra 178/* Enables queue_send on the specified queue - caller allocates the extra
159 data structure */ 179 data structure. Only queues which are taken to be owned by a thread should
180 enable this. Public waiting is not permitted. */
160void queue_enable_queue_send(struct event_queue *q, 181void queue_enable_queue_send(struct event_queue *q,
161 struct queue_sender_list *send) 182 struct queue_sender_list *send)
162{ 183{
163 q->send = send; 184 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
164 memset(send, 0, sizeof(struct queue_sender_list)); 185 corelock_lock(&q->cl);
186
187 q->send = NULL;
188 if(send != NULL)
189 {
190 memset(send, 0, sizeof(*send));
191 q->send = send;
192 }
193
194 corelock_unlock(&q->cl);
195 set_irq_level(oldlevel);
165} 196}
166#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 197#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
167 198
168 199/* Queue must not be available for use during this call */
169void queue_init(struct event_queue *q, bool register_queue) 200void queue_init(struct event_queue *q, bool register_queue)
170{ 201{
202 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
203
204 if(register_queue)
205 {
206 corelock_lock(&all_queues.cl);
207 }
208
209 corelock_init(&q->cl);
210 thread_queue_init(&q->queue);
171 q->read = 0; 211 q->read = 0;
172 q->write = 0; 212 q->write = 0;
173 q->thread = NULL;
174#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 213#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
175 q->send = NULL; /* No message sending by default */ 214 q->send = NULL; /* No message sending by default */
176#endif 215#endif
177 216
178 if(register_queue) 217 if(register_queue)
179 { 218 {
219 if(all_queues.count >= MAX_NUM_QUEUES)
220 {
221 panicf("queue_init->out of queues");
222 }
180 /* Add it to the all_queues array */ 223 /* Add it to the all_queues array */
181 all_queues[num_queues++] = q; 224 all_queues.queues[all_queues.count++] = q;
225 corelock_unlock(&all_queues.cl);
182 } 226 }
227
228 set_irq_level(oldlevel);
183} 229}
184 230
231/* Queue must not be available for use during this call */
185void queue_delete(struct event_queue *q) 232void queue_delete(struct event_queue *q)
186{ 233{
234 int oldlevel;
187 int i; 235 int i;
188 bool found = false;
189
190 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
191 236
192 /* Release theads waiting on queue */ 237 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
193 wakeup_thread(&q->thread); 238 corelock_lock(&all_queues.cl);
239 corelock_lock(&q->cl);
194 240
195#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
196 /* Release waiting threads and reply to any dequeued message
197 waiting for one. */
198 queue_release_all_senders(q);
199 queue_reply(q, 0);
200#endif
201
202 /* Find the queue to be deleted */ 241 /* Find the queue to be deleted */
203 for(i = 0;i < num_queues;i++) 242 for(i = 0;i < all_queues.count;i++)
204 { 243 {
205 if(all_queues[i] == q) 244 if(all_queues.queues[i] == q)
206 { 245 {
207 found = true; 246 /* Move the following queues up in the list */
247 all_queues.count--;
248
249 for(;i < all_queues.count;i++)
250 {
251 all_queues.queues[i] = all_queues.queues[i+1];
252 }
253
208 break; 254 break;
209 } 255 }
210 } 256 }
211 257
212 if(found) 258 corelock_unlock(&all_queues.cl);
213 { 259
214 /* Move the following queues up in the list */ 260 /* Release threads waiting on queue head */
215 for(;i < num_queues-1;i++) 261 thread_queue_wake(&q->queue);
216 { 262
217 all_queues[i] = all_queues[i+1]; 263#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
218 } 264 /* Release waiting threads for reply and reply to any dequeued
219 265 message waiting for one. */
220 num_queues--; 266 queue_release_all_senders(q);
221 } 267 queue_reply(q, 0);
222 268#endif
269
270 q->read = 0;
271 q->write = 0;
272
273 corelock_unlock(&q->cl);
223 set_irq_level(oldlevel); 274 set_irq_level(oldlevel);
224} 275}
225 276
226void queue_wait(struct event_queue *q, struct event *ev) 277/* NOTE: multiple threads waiting on a queue head cannot have a well-
278 defined release order if timeouts are used. If multiple threads must
279 access the queue head, use a dispatcher or queue_wait only. */
280void queue_wait(struct event_queue *q, struct queue_event *ev)
227{ 281{
228 int oldlevel; 282 int oldlevel;
229 unsigned int rd; 283 unsigned int rd;
230 284
231 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 285 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
286 corelock_lock(&q->cl);
232 287
233#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 288#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
234 if(q->send && q->send->curr_sender) 289 if(q->send && q->send->curr_sender)
@@ -240,8 +295,28 @@ void queue_wait(struct event_queue *q, struct event *ev)
240 295
241 if (q->read == q->write) 296 if (q->read == q->write)
242 { 297 {
243 set_irq_level_and_block_thread(&q->thread, oldlevel); 298 do
244 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 299 {
300#if CONFIG_CORELOCK == CORELOCK_NONE
301 cores[CURRENT_CORE].irq_level = oldlevel;
302#elif CONFIG_CORELOCK == SW_CORELOCK
303 const unsigned int core = CURRENT_CORE;
304 cores[core].blk_ops.irq_level = oldlevel;
305 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
306 cores[core].blk_ops.cl_p = &q->cl;
307#elif CONFIG_CORELOCK == CORELOCK_SWAP
308 const unsigned int core = CURRENT_CORE;
309 cores[core].blk_ops.irq_level = oldlevel;
310 cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
311 cores[core].blk_ops.var_u8p = &q->cl.locked;
312 cores[core].blk_ops.var_u8v = 0;
313#endif /* CONFIG_CORELOCK */
314 block_thread(&q->queue);
315 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
316 corelock_lock(&q->cl);
317 }
318 /* A message that woke us could now be gone */
319 while (q->read == q->write);
245 } 320 }
246 321
247 rd = q->read++ & QUEUE_LENGTH_MASK; 322 rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -254,13 +329,17 @@ void queue_wait(struct event_queue *q, struct event *ev)
254 queue_fetch_sender(q->send, rd); 329 queue_fetch_sender(q->send, rd);
255 } 330 }
256#endif 331#endif
257 332
333 corelock_unlock(&q->cl);
258 set_irq_level(oldlevel); 334 set_irq_level(oldlevel);
259} 335}
260 336
261void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) 337void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
262{ 338{
263 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 339 int oldlevel;
340
341 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
342 corelock_lock(&q->cl);
264 343
265#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 344#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
266 if (q->send && q->send->curr_sender) 345 if (q->send && q->send->curr_sender)
@@ -269,13 +348,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
269 queue_release_sender(&q->send->curr_sender, 0); 348 queue_release_sender(&q->send->curr_sender, 0);
270 } 349 }
271#endif 350#endif
272 351
273 if (q->read == q->write && ticks > 0) 352 if (q->read == q->write && ticks > 0)
274 { 353 {
275 set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel); 354#if CONFIG_CORELOCK == CORELOCK_NONE
355 cores[CURRENT_CORE].irq_level = oldlevel;
356#elif CONFIG_CORELOCK == SW_CORELOCK
357 const unsigned int core = CURRENT_CORE;
358 cores[core].blk_ops.irq_level = oldlevel;
359 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
360 cores[core].blk_ops.cl_p = &q->cl;
361#elif CONFIG_CORELOCK == CORELOCK_SWAP
362 const unsigned int core = CURRENT_CORE;
363 cores[core].blk_ops.irq_level = oldlevel;
364 cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
365 cores[core].blk_ops.var_u8p = &q->cl.locked;
366 cores[core].blk_ops.var_u8v = 0;
367#endif
368 block_thread_w_tmo(&q->queue, ticks);
276 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 369 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
370 corelock_lock(&q->cl);
277 } 371 }
278 372
373 /* no worry about a removed message here - status is checked inside
374 locks - perhaps verify if timeout or false alarm */
279 if (q->read != q->write) 375 if (q->read != q->write)
280 { 376 {
281 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; 377 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -293,15 +389,19 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
293 { 389 {
294 ev->id = SYS_TIMEOUT; 390 ev->id = SYS_TIMEOUT;
295 } 391 }
296 392
393 corelock_unlock(&q->cl);
297 set_irq_level(oldlevel); 394 set_irq_level(oldlevel);
298} 395}
299 396
300void queue_post(struct event_queue *q, long id, intptr_t data) 397void queue_post(struct event_queue *q, long id, intptr_t data)
301{ 398{
302 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 399 int oldlevel;
303 unsigned int wr; 400 unsigned int wr;
304 401
402 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
403 corelock_lock(&q->cl);
404
305 wr = q->write++ & QUEUE_LENGTH_MASK; 405 wr = q->write++ & QUEUE_LENGTH_MASK;
306 406
307 q->events[wr].id = id; 407 q->events[wr].id = id;
@@ -320,20 +420,24 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
320 } 420 }
321#endif 421#endif
322 422
323 wakeup_thread_irq_safe(&q->thread); 423 /* Wakeup a waiting thread if any */
424 wakeup_thread(&q->queue);
425
426 corelock_unlock(&q->cl);
324 set_irq_level(oldlevel); 427 set_irq_level(oldlevel);
325
326} 428}
327 429
328#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 430#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
329/* No wakeup_thread_irq_safe here because IRQ handlers are not allowed 431/* IRQ handlers are not allowed use of this function - we only aim to
330 use of this function - we only aim to protect the queue integrity by 432 protect the queue integrity by turning them off. */
331 turning them off. */
332intptr_t queue_send(struct event_queue *q, long id, intptr_t data) 433intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
333{ 434{
334 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 435 int oldlevel;
335 unsigned int wr; 436 unsigned int wr;
336 437
438 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
439 corelock_lock(&q->cl);
440
337 wr = q->write++ & QUEUE_LENGTH_MASK; 441 wr = q->write++ & QUEUE_LENGTH_MASK;
338 442
339 q->events[wr].id = id; 443 q->events[wr].id = id;
@@ -341,21 +445,38 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
341 445
342 if(q->send) 446 if(q->send)
343 { 447 {
448 const unsigned int core = CURRENT_CORE;
344 struct thread_entry **spp = &q->send->senders[wr]; 449 struct thread_entry **spp = &q->send->senders[wr];
345 450
346 if (*spp) 451 if(*spp)
347 { 452 {
348 /* overflow protect - unblock any thread waiting at this index */ 453 /* overflow protect - unblock any thread waiting at this index */
349 queue_release_sender(spp, 0); 454 queue_release_sender(spp, 0);
350 } 455 }
351 456
352 wakeup_thread(&q->thread); 457 /* Wakeup a waiting thread if any */
353 set_irq_level_and_block_thread(spp, oldlevel); 458 wakeup_thread(&q->queue);
354 return thread_get_current()->retval; 459
460#if CONFIG_CORELOCK == CORELOCK_NONE
461 cores[core].irq_level = oldlevel;
462#elif CONFIG_CORELOCK == SW_CORELOCK
463 cores[core].blk_ops.irq_level = oldlevel;
464 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
465 cores[core].blk_ops.cl_p = &q->cl;
466#elif CONFIG_CORELOCK == CORELOCK_SWAP
467 cores[core].blk_ops.irq_level = oldlevel;
468 cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
469 cores[core].blk_ops.var_u8p = &q->cl.locked;
470 cores[core].blk_ops.var_u8v = 0;
471#endif
472 block_thread_no_listlock(spp);
473 return cores[core].running->retval;
355 } 474 }
356 475
357 /* Function as queue_post if sending is not enabled */ 476 /* Function as queue_post if sending is not enabled */
358 wakeup_thread(&q->thread); 477 wakeup_thread(&q->queue);
478
479 corelock_unlock(&q->cl);
359 set_irq_level(oldlevel); 480 set_irq_level(oldlevel);
360 481
361 return 0; 482 return 0;
@@ -365,21 +486,52 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
365/* Query if the last message dequeued was added by queue_send or not */ 486/* Query if the last message dequeued was added by queue_send or not */
366bool queue_in_queue_send(struct event_queue *q) 487bool queue_in_queue_send(struct event_queue *q)
367{ 488{
368 return q->send && q->send->curr_sender; 489 bool in_send;
490
491#if NUM_CORES > 1
492 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
493 corelock_lock(&q->cl);
494#endif
495
496 in_send = q->send && q->send->curr_sender;
497
498#if NUM_CORES > 1
499 corelock_unlock(&q->cl);
500 set_irq_level(oldlevel);
501#endif
502
503 return in_send;
369} 504}
370#endif 505#endif
371 506
372/* Replies with retval to any dequeued message sent with queue_send */ 507/* Replies with retval to the last dequeued message sent with queue_send */
373void queue_reply(struct event_queue *q, intptr_t retval) 508void queue_reply(struct event_queue *q, intptr_t retval)
374{ 509{
375 /* No IRQ lock here since IRQs cannot change this */
376 if(q->send && q->send->curr_sender) 510 if(q->send && q->send->curr_sender)
377 { 511 {
378 queue_release_sender(&q->send->curr_sender, retval); 512#if NUM_CORES > 1
513 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
514 corelock_lock(&q->cl);
515 /* Double-check locking */
516 if(q->send && q->send->curr_sender)
517 {
518#endif
519
520 queue_release_sender(&q->send->curr_sender, retval);
521
522#if NUM_CORES > 1
523 }
524 corelock_unlock(&q->cl);
525 set_irq_level(oldlevel);
526#endif
379 } 527 }
380} 528}
381#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 529#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
382 530
531/* Poll queue to see if a message exists - careful in using the result if
532 * queue_remove_from_head is called when messages are posted - possibly use
533 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
534 * unsignals the queue may cause an unwanted block */
383bool queue_empty(const struct event_queue* q) 535bool queue_empty(const struct event_queue* q)
384{ 536{
385 return ( q->read == q->write ); 537 return ( q->read == q->write );
@@ -387,23 +539,30 @@ bool queue_empty(const struct event_queue* q)
387 539
388void queue_clear(struct event_queue* q) 540void queue_clear(struct event_queue* q)
389{ 541{
390 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 542 int oldlevel;
543
544 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
545 corelock_lock(&q->cl);
391 546
392#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 547#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
393 /* Release all thread waiting in the queue for a reply - 548 /* Release all threads waiting in the queue for a reply -
394 dequeued sent message will be handled by owning thread */ 549 dequeued sent message will be handled by owning thread */
395 queue_release_all_senders(q); 550 queue_release_all_senders(q);
396#endif 551#endif
397 552
398 q->read = 0; 553 q->read = 0;
399 q->write = 0; 554 q->write = 0;
400 555
556 corelock_unlock(&q->cl);
401 set_irq_level(oldlevel); 557 set_irq_level(oldlevel);
402} 558}
403 559
404void queue_remove_from_head(struct event_queue *q, long id) 560void queue_remove_from_head(struct event_queue *q, long id)
405{ 561{
406 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 562 int oldlevel;
563
564 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
565 corelock_lock(&q->cl);
407 566
408 while(q->read != q->write) 567 while(q->read != q->write)
409 { 568 {
@@ -428,7 +587,8 @@ void queue_remove_from_head(struct event_queue *q, long id)
428#endif 587#endif
429 q->read++; 588 q->read++;
430 } 589 }
431 590
591 corelock_unlock(&q->cl);
432 set_irq_level(oldlevel); 592 set_irq_level(oldlevel);
433} 593}
434 594
@@ -446,13 +606,23 @@ int queue_count(const struct event_queue *q)
446int queue_broadcast(long id, intptr_t data) 606int queue_broadcast(long id, intptr_t data)
447{ 607{
448 int i; 608 int i;
609
610#if NUM_CORES > 1
611 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
612 corelock_lock(&all_queues.cl);
613#endif
449 614
450 for(i = 0;i < num_queues;i++) 615 for(i = 0;i < all_queues.count;i++)
451 { 616 {
452 queue_post(all_queues[i], id, data); 617 queue_post(all_queues.queues[i], id, data);
453 } 618 }
619
620#if NUM_CORES > 1
621 corelock_unlock(&all_queues.cl);
622 set_irq_level(oldlevel);
623#endif
454 624
455 return num_queues; 625 return i;
456} 626}
457 627
458/**************************************************************************** 628/****************************************************************************
@@ -567,6 +737,7 @@ void TIMER1(void)
567{ 737{
568 int i; 738 int i;
569 739
740 /* Run through the list of tick tasks (using main core) */
570 TIMER1_VAL; /* Read value to ack IRQ */ 741 TIMER1_VAL; /* Read value to ack IRQ */
571 742
572 /* Run through the list of tick tasks using main CPU core - 743 /* Run through the list of tick tasks using main CPU core -
@@ -580,24 +751,8 @@ void TIMER1(void)
580 } 751 }
581 752
582#if NUM_CORES > 1 753#if NUM_CORES > 1
583#ifdef CPU_PP502x 754 /* Pulse the COP */
584 { 755 core_wake(COP);
585 /* If COP is sleeping - give it a kick */
586 /* TODO: Use a mailbox in addition to make sure it doesn't go to
587 * sleep if kicked just as it's headed to rest to make sure its
588 * tick checks won't be jittery. Don't bother at all if it owns no
589 * threads. */
590 unsigned int cop_ctl;
591
592 cop_ctl = COP_CTL;
593 if (cop_ctl & PROC_SLEEP)
594 {
595 COP_CTL = cop_ctl & ~PROC_SLEEP;
596 }
597 }
598#else
599 /* TODO: PP5002 */
600#endif
601#endif /* NUM_CORES */ 756#endif /* NUM_CORES */
602 757
603 current_tick++; 758 current_tick++;
@@ -837,49 +992,391 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
837 992
838#endif /* INCLUDE_TIMEOUT_API */ 993#endif /* INCLUDE_TIMEOUT_API */
839 994
840#ifndef SIMULATOR
841/*
842 * Simulator versions in uisimulator/SIMVER/
843 */
844
845/**************************************************************************** 995/****************************************************************************
846 * Simple mutex functions 996 * Simple mutex functions ;)
847 ****************************************************************************/ 997 ****************************************************************************/
848void mutex_init(struct mutex *m) 998void mutex_init(struct mutex *m)
849{ 999{
850 m->locked = false; 1000 m->queue = NULL;
851 m->thread = NULL; 1001 m->thread = NULL;
1002 m->count = 0;
1003 m->locked = 0;
1004#if CONFIG_CORELOCK == SW_CORELOCK
1005 corelock_init(&m->cl);
1006#endif
852} 1007}
853 1008
854void mutex_lock(struct mutex *m) 1009void mutex_lock(struct mutex *m)
855{ 1010{
856 if (test_and_set(&m->locked, 1)) 1011 const unsigned int core = CURRENT_CORE;
1012 struct thread_entry *const thread = cores[core].running;
1013
1014 if(thread == m->thread)
857 { 1015 {
858 /* Wait until the lock is open... */ 1016 m->count++;
859 block_thread(&m->thread); 1017 return;
860 } 1018 }
1019
1020 /* Repeat some stuff here or else all the variation is too difficult to
1021 read */
1022#if CONFIG_CORELOCK == CORELOCK_SWAP
1023 /* peek at lock until it's no longer busy */
1024 unsigned int locked;
1025 while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
1026 if(locked == 0)
1027 {
1028 m->thread = thread;
1029 m->locked = 1;
1030 return;
1031 }
1032
1033 /* Block until the lock is open... */
1034 cores[core].blk_ops.flags = TBOP_SET_VARu8;
1035 cores[core].blk_ops.var_u8p = &m->locked;
1036 cores[core].blk_ops.var_u8v = 1;
1037#else
1038 corelock_lock(&m->cl);
1039 if (m->locked == 0)
1040 {
1041 m->locked = 1;
1042 m->thread = thread;
1043 corelock_unlock(&m->cl);
1044 return;
1045 }
1046
1047 /* Block until the lock is open... */
1048#if CONFIG_CORELOCK == SW_CORELOCK
1049 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
1050 cores[core].blk_ops.cl_p = &m->cl;
1051#endif
1052#endif /* CONFIG_CORELOCK */
1053
1054 block_thread_no_listlock(&m->queue);
861} 1055}
862 1056
863void mutex_unlock(struct mutex *m) 1057void mutex_unlock(struct mutex *m)
864{ 1058{
865 if (m->thread == NULL) 1059 /* unlocker not being the owner is an unlocking violation */
866 m->locked = 0; 1060 KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running,
1061 "mutex_unlock->wrong thread (recurse)");
1062
1063 if(m->count > 0)
1064 {
1065 /* this thread still owns lock */
1066 m->count--;
1067 return;
1068 }
1069
1070#if CONFIG_CORELOCK == SW_CORELOCK
1071 /* lock out other cores */
1072 corelock_lock(&m->cl);
1073#elif CONFIG_CORELOCK == CORELOCK_SWAP
1074 /* wait for peeker to move on */
1075 while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
1076#endif
1077
1078 /* transfer to next queued thread if any */
1079 m->thread = wakeup_thread_no_listlock(&m->queue);
1080
1081 if(m->thread == NULL)
1082 {
1083 m->locked = 0; /* release lock */
1084#if CONFIG_CORELOCK == SW_CORELOCK
1085 corelock_unlock(&m->cl);
1086#endif
1087 }
1088 else /* another thread is waiting - remain locked */
1089 {
1090#if CONFIG_CORELOCK == SW_CORELOCK
1091 corelock_unlock(&m->cl);
1092#elif CONFIG_CORELOCK == CORELOCK_SWAP
1093 m->locked = 1;
1094#endif
1095 }
1096}
1097
1098/****************************************************************************
1099 * Simpl-er mutex functions ;)
1100 ****************************************************************************/
1101void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags))
1102{
1103 l->locked = 0;
1104 l->thread = NULL;
1105 l->count = 0;
1106#if NUM_CORES > 1
1107 l->task_switch = flags & SPINLOCK_TASK_SWITCH;
1108 corelock_init(&l->cl);
1109#endif
1110}
1111
1112void spinlock_lock(struct spinlock *l)
1113{
1114 struct thread_entry *const thread = cores[CURRENT_CORE].running;
1115
1116 if (l->thread == thread)
1117 {
1118 l->count++;
1119 return;
1120 }
1121
1122#if NUM_CORES > 1
1123 if (l->task_switch != 0)
1124#endif
1125 {
1126 /* Let other threads run until the lock is free */
1127 while(test_and_set(&l->locked, 1, &l->cl) != 0)
1128 {
1129 /* spin and switch until the lock is open... */
1130 switch_thread(NULL);
1131 }
1132 }
1133#if NUM_CORES > 1
867 else 1134 else
868 wakeup_thread(&m->thread); 1135 {
1136 /* Use the corelock purely */
1137 corelock_lock(&l->cl);
1138 }
1139#endif
1140
1141 l->thread = thread;
869} 1142}
870 1143
871void spinlock_lock(struct mutex *m) 1144void spinlock_unlock(struct spinlock *l)
872{ 1145{
873 while (test_and_set(&m->locked, 1)) 1146 /* unlocker not being the owner is an unlocking violation */
1147 KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
1148 "spinlock_unlock->wrong thread");
1149
1150 if (l->count > 0)
1151 {
1152 /* this thread still owns lock */
1153 l->count--;
1154 return;
1155 }
1156
1157 /* clear owner */
1158 l->thread = NULL;
1159
1160#if NUM_CORES > 1
1161 if (l->task_switch != 0)
1162#endif
874 { 1163 {
875 /* wait until the lock is open... */ 1164 /* release lock */
876 switch_thread(true, NULL); 1165#if CONFIG_CORELOCK == SW_CORELOCK
1166 /* This must be done since our unlock could be missed by the
1167 test_and_set and leave the object locked permanently */
1168 corelock_lock(&l->cl);
1169#endif
1170 l->locked = 0;
877 } 1171 }
1172
1173#if NUM_CORES > 1
1174 corelock_unlock(&l->cl);
1175#endif
878} 1176}
879 1177
880void spinlock_unlock(struct mutex *m) 1178/****************************************************************************
1179 * Simple semaphore functions ;)
1180 ****************************************************************************/
1181#ifdef HAVE_SEMAPHORE_OBJECTS
1182void semaphore_init(struct semaphore *s, int max, int start)
881{ 1183{
882 m->locked = 0; 1184 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1185 "semaphore_init->inv arg");
1186 s->queue = NULL;
1187 s->max = max;
1188 s->count = start;
1189#if CONFIG_CORELOCK == SW_CORELOCK
1190 corelock_init(&s->cl);
1191#endif
883} 1192}
884 1193
885#endif /* ndef SIMULATOR */ 1194void semaphore_wait(struct semaphore *s)
1195{
1196#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1197 corelock_lock(&s->cl);
1198 if(--s->count >= 0)
1199 {
1200 corelock_unlock(&s->cl);
1201 return;
1202 }
1203#elif CONFIG_CORELOCK == CORELOCK_SWAP
1204 int count;
1205 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1206 if(--count >= 0)
1207 {
1208 s->count = count;
1209 return;
1210 }
1211#endif
1212
1213 /* too many waits - block until dequeued */
1214#if CONFIG_CORELOCK == SW_CORELOCK
1215 const unsigned int core = CURRENT_CORE;
1216 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
1217 cores[core].blk_ops.cl_p = &s->cl;
1218#elif CONFIG_CORELOCK == CORELOCK_SWAP
1219 const unsigned int core = CURRENT_CORE;
1220 cores[core].blk_ops.flags = TBOP_SET_VARi;
1221 cores[core].blk_ops.var_ip = &s->count;
1222 cores[core].blk_ops.var_iv = count;
1223#endif
1224 block_thread_no_listlock(&s->queue);
1225}
1226
1227void semaphore_release(struct semaphore *s)
1228{
1229#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1230 corelock_lock(&s->cl);
1231 if (s->count < s->max)
1232 {
1233 if (++s->count <= 0)
1234 {
1235#elif CONFIG_CORELOCK == CORELOCK_SWAP
1236 int count;
1237 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1238 if(count < s->max)
1239 {
1240 if(++count <= 0)
1241 {
1242#endif /* CONFIG_CORELOCK */
1243
1244 /* there should be threads in this queue */
1245 KERNEL_ASSERT(s->queue.queue != NULL, "semaphore->wakeup");
1246 /* a thread was queued - wake it up */
1247 wakeup_thread_no_listlock(&s->queue);
1248 }
1249 }
1250
1251#if CONFIG_CORELOCK == SW_CORELOCK
1252 corelock_unlock(&s->cl);
1253#elif CONFIG_CORELOCK == CORELOCK_SWAP
1254 s->count = count;
1255#endif
1256}
1257#endif /* HAVE_SEMAPHORE_OBJECTS */
1258
1259/****************************************************************************
1260 * Simple event functions ;)
1261 ****************************************************************************/
1262#ifdef HAVE_EVENT_OBJECTS
1263void event_init(struct event *e, unsigned int flags)
1264{
1265 e->queues[STATE_NONSIGNALED] = NULL;
1266 e->queues[STATE_SIGNALED] = NULL;
1267 e->state = flags & STATE_SIGNALED;
1268 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
1269#if CONFIG_CORELOCK == SW_CORELOCK
1270 corelock_init(&e->cl);
1271#endif
1272}
1273
1274void event_wait(struct event *e, unsigned int for_state)
1275{
1276 unsigned int last_state;
1277#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1278 corelock_lock(&e->cl);
1279 last_state = e->state;
1280#elif CONFIG_CORELOCK == CORELOCK_SWAP
1281 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1282#endif
1283
1284 if(e->automatic != 0)
1285 {
1286 /* wait for false always satisfied by definition
1287 or if it just changed to false */
1288 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
1289 {
1290 /* automatic - unsignal */
1291 e->state = STATE_NONSIGNALED;
1292#if CONFIG_CORELOCK == SW_CORELOCK
1293 corelock_unlock(&e->cl);
1294#endif
1295 return;
1296 }
1297 /* block until state matches */
1298 }
1299 else if(for_state == last_state)
1300 {
1301 /* the state being waited for is the current state */
1302#if CONFIG_CORELOCK == SW_CORELOCK
1303 corelock_unlock(&e->cl);
1304#elif CONFIG_CORELOCK == CORELOCK_SWAP
1305 e->state = last_state;
1306#endif
1307 return;
1308 }
1309
1310 {
1311 /* current state does not match wait-for state */
1312#if CONFIG_CORELOCK == SW_CORELOCK
1313 const unsigned int core = CURRENT_CORE;
1314 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
1315 cores[core].blk_ops.cl_p = &e->cl;
1316#elif CONFIG_CORELOCK == CORELOCK_SWAP
1317 const unsigned int core = CURRENT_CORE;
1318 cores[core].blk_ops.flags = TBOP_SET_VARu8;
1319 cores[core].blk_ops.var_u8p = &e->state;
1320 cores[core].blk_ops.var_u8v = last_state;
1321#endif
1322 block_thread_no_listlock(&e->queues[for_state]);
1323 }
1324}
1325
1326void event_set_state(struct event *e, unsigned int state)
1327{
1328 unsigned int last_state;
1329#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
1330 corelock_lock(&e->cl);
1331 last_state = e->state;
1332#elif CONFIG_CORELOCK == CORELOCK_SWAP
1333 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1334#endif
1335
1336 if(last_state == state)
1337 {
1338 /* no change */
1339#if CONFIG_CORELOCK == SW_CORELOCK
1340 corelock_unlock(&e->cl);
1341#elif CONFIG_CORELOCK == CORELOCK_SWAP
1342 e->state = last_state;
1343#endif
1344 return;
1345 }
1346
1347 if(state == STATE_SIGNALED)
1348 {
1349 if(e->automatic != 0)
1350 {
1351 struct thread_entry *thread;
1352 /* no thread should have ever blocked for unsignaled */
1353 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL,
1354 "set_event_state->queue[NS]:S");
1355 /* pass to next thread and keep unsignaled - "pulse" */
1356 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
1357 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
1358 }
1359 else
1360 {
1361 /* release all threads waiting for signaled */
1362 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
1363 e->state = STATE_SIGNALED;
1364 }
1365 }
1366 else
1367 {
1368 /* release all threads waiting for unsignaled */
1369
1370 /* no thread should have ever blocked if automatic */
1371 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL ||
1372 e->automatic == 0, "set_event_state->queue[NS]:NS");
1373
1374 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
1375 e->state = STATE_NONSIGNALED;
1376 }
1377
1378#if CONFIG_CORELOCK == SW_CORELOCK
1379 corelock_unlock(&e->cl);
1380#endif
1381}
1382#endif /* HAVE_EVENT_OBJECTS */