summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c136
1 files changed, 23 insertions, 113 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 11b10e287e..77b0c6ffef 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -46,16 +46,21 @@ void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR;
46void kernel_init(void) 46void kernel_init(void)
47{ 47{
48 /* Init the threading API */ 48 /* Init the threading API */
49 init_threads(); 49#if NUM_CORES > 1
50 50 if (CURRENT_CORE == COP)
51 if(CURRENT_CORE == CPU)
52 { 51 {
53 memset(tick_funcs, 0, sizeof(tick_funcs)); 52 /* This enables the interrupt but it won't be active until
54 53 the timer is actually started and interrupts are unmasked */
55 num_queues = 0; 54 tick_start(1000/HZ);
56 memset(all_queues, 0, sizeof(all_queues));
57 } 55 }
56#endif
57
58 init_threads();
58 59
60 /* No processor other than the CPU will proceed here */
61 memset(tick_funcs, 0, sizeof(tick_funcs));
62 num_queues = 0;
63 memset(all_queues, 0, sizeof(all_queues));
59 tick_start(1000/HZ); 64 tick_start(1000/HZ);
60} 65}
61 66
@@ -172,9 +177,6 @@ void queue_init(struct event_queue *q, bool register_queue)
172 q->read = 0; 177 q->read = 0;
173 q->write = 0; 178 q->write = 0;
174 q->thread = NULL; 179 q->thread = NULL;
175#if NUM_CORES > 1
176 q->irq_safe = false;
177#endif
178#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 180#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
179 q->send = NULL; /* No message sending by default */ 181 q->send = NULL; /* No message sending by default */
180#endif 182#endif
@@ -186,29 +188,12 @@ void queue_init(struct event_queue *q, bool register_queue)
186 } 188 }
187} 189}
188 190
189#if NUM_CORES > 1
190/**
191 * If IRQ mode is enabled, some core-wise locking mechanisms are disabled
192 * causing accessing queue to be no longer thread safe from the other core.
193 * However, that locking mechanism would also kill IRQ handlers.
194 *
195 * @param q struct of an event_queue
196 * @param state enable/disable IRQ mode
197 * @default state disabled
198 */
199void queue_set_irq_safe(struct event_queue *q, bool state)
200{
201 q->irq_safe = state;
202}
203#endif
204
205void queue_delete(struct event_queue *q) 191void queue_delete(struct event_queue *q)
206{ 192{
207 int i; 193 int i;
208 bool found = false; 194 bool found = false;
209 195
210 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 196 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
211 lock_cores();
212 197
213 /* Release theads waiting on queue */ 198 /* Release theads waiting on queue */
214 wakeup_thread(&q->thread); 199 wakeup_thread(&q->thread);
@@ -241,7 +226,6 @@ void queue_delete(struct event_queue *q)
241 num_queues--; 226 num_queues--;
242 } 227 }
243 228
244 unlock_cores();
245 set_irq_level(oldlevel); 229 set_irq_level(oldlevel);
246} 230}
247 231
@@ -251,13 +235,11 @@ void queue_wait(struct event_queue *q, struct event *ev)
251 unsigned int rd; 235 unsigned int rd;
252 236
253 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 237 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
254 lock_cores();
255 238
256 if (q->read == q->write) 239 if (q->read == q->write)
257 { 240 {
258 set_irq_level_and_block_thread(&q->thread, oldlevel); 241 set_irq_level_and_block_thread(&q->thread, oldlevel);
259 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 242 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
260 lock_cores();
261 } 243 }
262 244
263 rd = q->read++ & QUEUE_LENGTH_MASK; 245 rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -271,20 +253,17 @@ void queue_wait(struct event_queue *q, struct event *ev)
271 } 253 }
272#endif 254#endif
273 255
274 unlock_cores();
275 set_irq_level(oldlevel); 256 set_irq_level(oldlevel);
276} 257}
277 258
278void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) 259void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
279{ 260{
280 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 261 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
281 lock_cores();
282 262
283 if (q->read == q->write && ticks > 0) 263 if (q->read == q->write && ticks > 0)
284 { 264 {
285 set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel); 265 set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
286 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 266 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
287 lock_cores();
288 } 267 }
289 268
290 if (q->read != q->write) 269 if (q->read != q->write)
@@ -305,7 +284,6 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
305 ev->id = SYS_TIMEOUT; 284 ev->id = SYS_TIMEOUT;
306 } 285 }
307 286
308 unlock_cores();
309 set_irq_level(oldlevel); 287 set_irq_level(oldlevel);
310} 288}
311 289
@@ -314,11 +292,6 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
314 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 292 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
315 unsigned int wr; 293 unsigned int wr;
316 294
317#if NUM_CORES > 1
318 if (!q->irq_safe)
319 lock_cores();
320#endif
321
322 wr = q->write++ & QUEUE_LENGTH_MASK; 295 wr = q->write++ & QUEUE_LENGTH_MASK;
323 296
324 q->events[wr].id = id; 297 q->events[wr].id = id;
@@ -338,10 +311,6 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
338#endif 311#endif
339 312
340 wakeup_thread_irq_safe(&q->thread); 313 wakeup_thread_irq_safe(&q->thread);
341#if NUM_CORES > 1
342 if (!q->irq_safe)
343 unlock_cores();
344#endif
345 set_irq_level(oldlevel); 314 set_irq_level(oldlevel);
346 315
347} 316}
@@ -355,8 +324,6 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
355 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 324 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
356 unsigned int wr; 325 unsigned int wr;
357 326
358 lock_cores();
359
360 wr = q->write++ & QUEUE_LENGTH_MASK; 327 wr = q->write++ & QUEUE_LENGTH_MASK;
361 328
362 q->events[wr].id = id; 329 q->events[wr].id = id;
@@ -379,7 +346,6 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
379 346
380 /* Function as queue_post if sending is not enabled */ 347 /* Function as queue_post if sending is not enabled */
381 wakeup_thread(&q->thread); 348 wakeup_thread(&q->thread);
382 unlock_cores();
383 set_irq_level(oldlevel); 349 set_irq_level(oldlevel);
384 350
385 return 0; 351 return 0;
@@ -396,43 +362,23 @@ bool queue_in_queue_send(struct event_queue *q)
396/* Replies with retval to any dequeued message sent with queue_send */ 362/* Replies with retval to any dequeued message sent with queue_send */
397void queue_reply(struct event_queue *q, intptr_t retval) 363void queue_reply(struct event_queue *q, intptr_t retval)
398{ 364{
399 lock_cores();
400 /* No IRQ lock here since IRQs cannot change this */ 365 /* No IRQ lock here since IRQs cannot change this */
401 if(q->send && q->send->curr_sender) 366 if(q->send && q->send->curr_sender)
402 { 367 {
403 queue_release_sender(&q->send->curr_sender, retval); 368 queue_release_sender(&q->send->curr_sender, retval);
404 } 369 }
405 unlock_cores();
406} 370}
407#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 371#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
408 372
409bool queue_empty(const struct event_queue* q) 373bool queue_empty(const struct event_queue* q)
410{ 374{
411 bool is_empty; 375 return ( q->read == q->write );
412
413#if NUM_CORES > 1
414 if (!q->irq_safe)
415 lock_cores();
416#endif
417
418 is_empty = ( q->read == q->write );
419#if NUM_CORES > 1
420 if (!q->irq_safe)
421 unlock_cores();
422#endif
423
424 return is_empty;
425} 376}
426 377
427void queue_clear(struct event_queue* q) 378void queue_clear(struct event_queue* q)
428{ 379{
429 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 380 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
430 381
431#if NUM_CORES > 1
432 if (!q->irq_safe)
433 lock_cores();
434#endif
435
436#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 382#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
437 /* Release all thread waiting in the queue for a reply - 383 /* Release all thread waiting in the queue for a reply -
438 dequeued sent message will be handled by owning thread */ 384 dequeued sent message will be handled by owning thread */
@@ -442,11 +388,6 @@ void queue_clear(struct event_queue* q)
442 q->read = 0; 388 q->read = 0;
443 q->write = 0; 389 q->write = 0;
444 390
445#if NUM_CORES > 1
446 if (!q->irq_safe)
447 unlock_cores();
448#endif
449
450 set_irq_level(oldlevel); 391 set_irq_level(oldlevel);
451} 392}
452 393
@@ -454,11 +395,6 @@ void queue_remove_from_head(struct event_queue *q, long id)
454{ 395{
455 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 396 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
456 397
457#if NUM_CORES > 1
458 if (!q->irq_safe)
459 lock_cores();
460#endif
461
462 while(q->read != q->write) 398 while(q->read != q->write)
463 { 399 {
464 unsigned int rd = q->read & QUEUE_LENGTH_MASK; 400 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
@@ -483,11 +419,6 @@ void queue_remove_from_head(struct event_queue *q, long id)
483 q->read++; 419 q->read++;
484 } 420 }
485 421
486#if NUM_CORES > 1
487 if (!q->irq_safe)
488 unlock_cores();
489#endif
490
491 set_irq_level(oldlevel); 422 set_irq_level(oldlevel);
492} 423}
493 424
@@ -499,24 +430,7 @@ void queue_remove_from_head(struct event_queue *q, long id)
499 */ 430 */
500int queue_count(const struct event_queue *q) 431int queue_count(const struct event_queue *q)
501{ 432{
502 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 433 return q->write - q->read;
503 int result;
504
505#if NUM_CORES > 1
506 if (!q->irq_safe)
507 lock_cores();
508#endif
509
510 result = q->write - q->read;
511
512#if NUM_CORES > 1
513 if (!q->irq_safe)
514 unlock_cores();
515#endif
516
517 set_irq_level(oldlevel);
518
519 return result;
520} 434}
521 435
522int queue_broadcast(long id, intptr_t data) 436int queue_broadcast(long id, intptr_t data)
@@ -644,22 +558,22 @@ void TIMER1(void)
644 int i; 558 int i;
645 559
646 TIMER1_VAL; /* Read value to ack IRQ */ 560 TIMER1_VAL; /* Read value to ack IRQ */
647 /* Run through the list of tick tasks (using main core) */ 561
648 if (CURRENT_CORE == CPU) 562 /* Run through the list of tick tasks (using main core -
563 COP does not dispatch ticks to this subroutine) */
564 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
649 { 565 {
650 for (i = 0;i < MAX_NUM_TICK_TASKS;i++) 566 if (tick_funcs[i])
651 { 567 {
652 if (tick_funcs[i]) 568 tick_funcs[i]();
653 {
654 tick_funcs[i]();
655 }
656 } 569 }
657
658 current_tick++;
659 } 570 }
571
572 current_tick++;
660} 573}
661#endif 574#endif
662 575
576/* Must be last function called init kernel/thread initialization */
663void tick_start(unsigned int interval_in_ms) 577void tick_start(unsigned int interval_in_ms)
664{ 578{
665#ifndef BOOTLOADER 579#ifndef BOOTLOADER
@@ -922,14 +836,10 @@ void mutex_lock(struct mutex *m)
922 836
923void mutex_unlock(struct mutex *m) 837void mutex_unlock(struct mutex *m)
924{ 838{
925 lock_cores();
926
927 if (m->thread == NULL) 839 if (m->thread == NULL)
928 m->locked = 0; 840 m->locked = 0;
929 else 841 else
930 wakeup_thread(&m->thread); 842 wakeup_thread(&m->thread);
931
932 unlock_cores();
933} 843}
934 844
935void spinlock_lock(struct mutex *m) 845void spinlock_lock(struct mutex *m)