summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c1329
1 files changed, 687 insertions, 642 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 835181f1ae..47c0d58a95 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -20,21 +20,30 @@
20#include <string.h> 20#include <string.h>
21#include "config.h" 21#include "config.h"
22#include "kernel.h" 22#include "kernel.h"
23#ifdef SIMULATOR
24#include "system-sdl.h"
25#include "debug.h"
26#endif
23#include "thread.h" 27#include "thread.h"
24#include "cpu.h" 28#include "cpu.h"
25#include "system.h" 29#include "system.h"
26#include "panic.h" 30#include "panic.h"
27 31
28/* Make this nonzero to enable more elaborate checks on objects */ 32/* Make this nonzero to enable more elaborate checks on objects */
29#ifdef DEBUG 33#if defined(DEBUG) || defined(SIMULATOR)
30#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */ 34#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
31#else 35#else
32#define KERNEL_OBJECT_CHECKS 0 36#define KERNEL_OBJECT_CHECKS 0
33#endif 37#endif
34 38
35#if KERNEL_OBJECT_CHECKS 39#if KERNEL_OBJECT_CHECKS
40#ifdef SIMULATOR
41#define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43#else
36#define KERNEL_ASSERT(exp, msg...) \ 44#define KERNEL_ASSERT(exp, msg...) \
37 ({ if (!({ exp; })) panicf(msg); }) 45 ({ if (!({ exp; })) panicf(msg); })
46#endif
38#else 47#else
39#define KERNEL_ASSERT(exp, msg...) ({}) 48#define KERNEL_ASSERT(exp, msg...) ({})
40#endif 49#endif
@@ -52,9 +61,7 @@ static struct
52{ 61{
53 int count; 62 int count;
54 struct event_queue *queues[MAX_NUM_QUEUES]; 63 struct event_queue *queues[MAX_NUM_QUEUES];
55#if NUM_CORES > 1 64 IF_COP( struct corelock cl; )
56 struct corelock cl;
57#endif
58} all_queues NOCACHEBSS_ATTR; 65} all_queues NOCACHEBSS_ATTR;
59 66
60/**************************************************************************** 67/****************************************************************************
@@ -77,6 +84,334 @@ void kernel_init(void)
77 } 84 }
78} 85}
79 86
87/****************************************************************************
88 * Timer tick
89 ****************************************************************************/
90#if CONFIG_CPU == SH7034
91void tick_start(unsigned int interval_in_ms)
92{
93 unsigned long count;
94
95 count = CPU_FREQ * interval_in_ms / 1000 / 8;
96
97 if(count > 0x10000)
98 {
99 panicf("Error! The tick interval is too long (%d ms)\n",
100 interval_in_ms);
101 return;
102 }
103
104 /* We are using timer 0 */
105
106 TSTR &= ~0x01; /* Stop the timer */
107 TSNC &= ~0x01; /* No synchronization */
108 TMDR &= ~0x01; /* Operate normally */
109
110 TCNT0 = 0; /* Start counting at 0 */
111 GRA0 = (unsigned short)(count - 1);
112 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
113
114 /* Enable interrupt on level 1 */
115 IPRC = (IPRC & ~0x00f0) | 0x0010;
116
117 TSR0 &= ~0x01;
118 TIER0 = 0xf9; /* Enable GRA match interrupt */
119
120 TSTR |= 0x01; /* Start timer 1 */
121}
122
123void IMIA0(void) __attribute__ ((interrupt_handler));
124void IMIA0(void)
125{
126 int i;
127
128 /* Run through the list of tick tasks */
129 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
130 {
131 if(tick_funcs[i])
132 {
133 tick_funcs[i]();
134 }
135 }
136
137 current_tick++;
138
139 TSR0 &= ~0x01;
140}
141#elif defined(CPU_COLDFIRE)
142void tick_start(unsigned int interval_in_ms)
143{
144 unsigned long count;
145 int prescale;
146
147 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
148
149 if(count > 0x10000)
150 {
151 panicf("Error! The tick interval is too long (%d ms)\n",
152 interval_in_ms);
153 return;
154 }
155
156 prescale = cpu_frequency / CPU_FREQ;
157 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
158 changes within timer.c */
159
160 /* We are using timer 0 */
161
162 TRR0 = (unsigned short)(count - 1); /* The reference count */
163 TCN0 = 0; /* reset the timer */
164 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
165 /* restart, CLK/16, enabled, prescaler */
166
167 TER0 = 0xff; /* Clear all events */
168
169 ICR1 = 0x8c; /* Interrupt on level 3.0 */
170 IMR &= ~0x200;
171}
172
173void TIMER0(void) __attribute__ ((interrupt_handler));
174void TIMER0(void)
175{
176 int i;
177
178 /* Run through the list of tick tasks */
179 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
180 {
181 if(tick_funcs[i])
182 {
183 tick_funcs[i]();
184 }
185 }
186
187 current_tick++;
188
189 TER0 = 0xff; /* Clear all events */
190}
191
192#elif defined(CPU_PP)
193
194#ifndef BOOTLOADER
195void TIMER1(void)
196{
197 int i;
198
199 /* Run through the list of tick tasks (using main core) */
200 TIMER1_VAL; /* Read value to ack IRQ */
201
202 /* Run through the list of tick tasks using main CPU core -
203 wake up the COP through its control interface to provide pulse */
204 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
205 {
206 if (tick_funcs[i])
207 {
208 tick_funcs[i]();
209 }
210 }
211
212#if NUM_CORES > 1
213 /* Pulse the COP */
214 core_wake(COP);
215#endif /* NUM_CORES */
216
217 current_tick++;
218}
219#endif
220
221/* Must be last function called init kernel/thread initialization */
222void tick_start(unsigned int interval_in_ms)
223{
224#ifndef BOOTLOADER
225 TIMER1_CFG = 0x0;
226 TIMER1_VAL;
227 /* enable timer */
228 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
229 /* unmask interrupt source */
230 CPU_INT_EN = TIMER1_MASK;
231#else
232 /* We don't enable interrupts in the bootloader */
233 (void)interval_in_ms;
234#endif
235}
236
237#elif CONFIG_CPU == PNX0101
238
239void timer_handler(void)
240{
241 int i;
242
243 /* Run through the list of tick tasks */
244 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
245 {
246 if(tick_funcs[i])
247 tick_funcs[i]();
248 }
249
250 current_tick++;
251
252 TIMER0.clr = 0;
253}
254
255void tick_start(unsigned int interval_in_ms)
256{
257 TIMER0.ctrl &= ~0x80; /* Disable the counter */
258 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
259 TIMER0.load = 3000000 * interval_in_ms / 1000;
260 TIMER0.ctrl &= ~0xc; /* No prescaler */
261 TIMER0.clr = 1; /* Clear the interrupt request */
262
263 irq_set_int_handler(IRQ_TIMER0, timer_handler);
264 irq_enable_int(IRQ_TIMER0);
265
266 TIMER0.ctrl |= 0x80; /* Enable the counter */
267}
268#endif
269
270int tick_add_task(void (*f)(void))
271{
272 int i;
273 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
274
275 /* Add a task if there is room */
276 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
277 {
278 if(tick_funcs[i] == NULL)
279 {
280 tick_funcs[i] = f;
281 set_irq_level(oldlevel);
282 return 0;
283 }
284 }
285 set_irq_level(oldlevel);
286 panicf("Error! tick_add_task(): out of tasks");
287 return -1;
288}
289
290int tick_remove_task(void (*f)(void))
291{
292 int i;
293 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
294
295 /* Remove a task if it is there */
296 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
297 {
298 if(tick_funcs[i] == f)
299 {
300 tick_funcs[i] = NULL;
301 set_irq_level(oldlevel);
302 return 0;
303 }
304 }
305
306 set_irq_level(oldlevel);
307 return -1;
308}
309
310/****************************************************************************
311 * Tick-based interval timers/one-shots - be mindful this is not really
312 * intended for continuous timers but for events that need to run for a short
313 * time and be cancelled without further software intervention.
314 ****************************************************************************/
315#ifdef INCLUDE_TIMEOUT_API
316static struct timeout *tmo_list = NULL; /* list of active timeout events */
317
318/* timeout tick task - calls event handlers when they expire
319 * Event handlers may alter ticks, callback and data during operation.
320 */
321static void timeout_tick(void)
322{
323 unsigned long tick = current_tick;
324 struct timeout *curr, *next;
325
326 for (curr = tmo_list; curr != NULL; curr = next)
327 {
328 next = (struct timeout *)curr->next;
329
330 if (TIME_BEFORE(tick, curr->expires))
331 continue;
332
333 /* this event has expired - call callback */
334 if (curr->callback(curr))
335 *(long *)&curr->expires = tick + curr->ticks; /* reload */
336 else
337 timeout_cancel(curr); /* cancel */
338 }
339}
340
341/* Cancels a timeout callback - can be called from the ISR */
342void timeout_cancel(struct timeout *tmo)
343{
344 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
345
346 if (tmo_list != NULL)
347 {
348 struct timeout *curr = tmo_list;
349 struct timeout *prev = NULL;
350
351 while (curr != tmo && curr != NULL)
352 {
353 prev = curr;
354 curr = (struct timeout *)curr->next;
355 }
356
357 if (curr != NULL)
358 {
359 /* in list */
360 if (prev == NULL)
361 tmo_list = (struct timeout *)curr->next;
362 else
363 *(const struct timeout **)&prev->next = curr->next;
364
365 if (tmo_list == NULL)
366 tick_remove_task(timeout_tick); /* last one - remove task */
367 }
368 /* not in list or tmo == NULL */
369 }
370
371 set_irq_level(oldlevel);
372}
373
374/* Adds a timeout callback - calling with an active timeout resets the
375 interval - can be called from the ISR */
376void timeout_register(struct timeout *tmo, timeout_cb_type callback,
377 int ticks, intptr_t data)
378{
379 int oldlevel;
380 struct timeout *curr;
381
382 if (tmo == NULL)
383 return;
384
385 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
386
387 /* see if this one is already registered */
388 curr = tmo_list;
389 while (curr != tmo && curr != NULL)
390 curr = (struct timeout *)curr->next;
391
392 if (curr == NULL)
393 {
394 /* not found - add it */
395 if (tmo_list == NULL)
396 tick_add_task(timeout_tick); /* first one - add task */
397
398 *(struct timeout **)&tmo->next = tmo_list;
399 tmo_list = tmo;
400 }
401
402 tmo->callback = callback;
403 tmo->ticks = ticks;
404 tmo->data = data;
405 *(long *)&tmo->expires = current_tick + ticks;
406
407 set_irq_level(oldlevel);
408}
409
410#endif /* INCLUDE_TIMEOUT_API */
411
412/****************************************************************************
413 * Thread stuff
414 ****************************************************************************/
80void sleep(int ticks) 415void sleep(int ticks)
81{ 416{
82#if CONFIG_CPU == S3C2440 && defined(BOOTLOADER) 417#if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
@@ -96,9 +431,11 @@ void sleep(int ticks)
96#elif defined(CPU_PP) && defined(BOOTLOADER) 431#elif defined(CPU_PP) && defined(BOOTLOADER)
97 unsigned stop = USEC_TIMER + ticks * (1000000/HZ); 432 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
98 while (TIME_BEFORE(USEC_TIMER, stop)) 433 while (TIME_BEFORE(USEC_TIMER, stop))
99 switch_thread(NULL); 434 switch_thread();
100#else 435#else
436 set_irq_level(HIGHEST_IRQ_LEVEL);
101 sleep_thread(ticks); 437 sleep_thread(ticks);
438 switch_thread();
102#endif 439#endif
103} 440}
104 441
@@ -107,7 +444,7 @@ void yield(void)
107#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER)) 444#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
108 /* Some targets don't like yielding in the bootloader */ 445 /* Some targets don't like yielding in the bootloader */
109#else 446#else
110 switch_thread(NULL); 447 switch_thread();
111#endif 448#endif
112} 449}
113 450
@@ -116,43 +453,50 @@ void yield(void)
116 ****************************************************************************/ 453 ****************************************************************************/
117 454
118#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 455#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
119/* Moves waiting thread's descriptor to the current sender when a 456/****************************************************************************
120 message is dequeued */ 457 * Sender thread queue structure that aids implementation of priority
121static void queue_fetch_sender(struct queue_sender_list *send, 458 * inheritance on queues because the send list structure is the same as
122 unsigned int i) 459 * for all other kernel objects:
123{ 460 *
124 struct thread_entry **spp = &send->senders[i]; 461 * Example state:
125 462 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
126 if(*spp) 463 * E3 was posted with queue_post
127 { 464 * 4 events remain enqueued (E1-E4)
128 send->curr_sender = *spp; 465 *
129 *spp = NULL; 466 * rd wr
130 } 467 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
131} 468 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
469 * \/ \/ \/
470 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
471 * q->send->curr_sender: /\
472 *
473 * Thread has E0 in its own struct queue_event.
474 *
475 ****************************************************************************/
132 476
133/* Puts the specified return value in the waiting thread's return value 477/* Puts the specified return value in the waiting thread's return value
134 * and wakes the thread. 478 * and wakes the thread.
135 * 1) A sender should be confirmed to exist before calling which makes it 479 *
136 * more efficent to reject the majority of cases that don't need this 480 * A sender should be confirmed to exist before calling which makes it
137 called. 481 * more efficent to reject the majority of cases that don't need this
138 * 2) Requires interrupts disabled since queue overflows can cause posts 482 * called.
139 * from interrupt handlers to wake threads. Not doing so could cause
140 * an attempt at multiple wakes or other problems.
141 */ 483 */
142static void queue_release_sender(struct thread_entry **sender, 484static void queue_release_sender(struct thread_entry **sender,
143 intptr_t retval) 485 intptr_t retval)
144{ 486{
145 (*sender)->retval = retval; 487 struct thread_entry *thread = *sender;
146 wakeup_thread_no_listlock(sender); 488
147 /* This should _never_ happen - there must never be multiple 489 *sender = NULL; /* Clear slot. */
148 threads in this list and it is a corrupt state */ 490 thread->wakeup_ext_cb = NULL; /* Clear callback. */
149 KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender); 491 thread->retval = retval; /* Assign thread-local return value. */
492 *thread->bqp = thread; /* Move blocking queue head to thread since
493 wakeup_thread wakes the first thread in
494 the list. */
495 wakeup_thread(thread->bqp);
150} 496}
151 497
152/* Releases any waiting threads that are queued with queue_send - 498/* Releases any waiting threads that are queued with queue_send -
153 * reply with 0. 499 * reply with 0.
154 * Disable IRQs and lock before calling since it uses
155 * queue_release_sender.
156 */ 500 */
157static void queue_release_all_senders(struct event_queue *q) 501static void queue_release_all_senders(struct event_queue *q)
158{ 502{
@@ -172,25 +516,103 @@ static void queue_release_all_senders(struct event_queue *q)
172 } 516 }
173} 517}
174 518
519/* Callback to do extra forced removal steps from sender list in addition
520 * to the normal blocking queue removal and priority dis-inherit */
521static void queue_remove_sender_thread_cb(struct thread_entry *thread)
522{
523 *((struct thread_entry **)thread->retval) = NULL;
524 thread->wakeup_ext_cb = NULL;
525 thread->retval = 0;
526}
527
175/* Enables queue_send on the specified queue - caller allocates the extra 528/* Enables queue_send on the specified queue - caller allocates the extra
176 data structure. Only queues which are taken to be owned by a thread should 529 * data structure. Only queues which are taken to be owned by a thread should
177 enable this. Public waiting is not permitted. */ 530 * enable this however an official owner is not compulsory but must be
531 * specified for priority inheritance to operate.
532 *
533 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
534 * messages results in an undefined order of message replies.
535 */
178void queue_enable_queue_send(struct event_queue *q, 536void queue_enable_queue_send(struct event_queue *q,
179 struct queue_sender_list *send) 537 struct queue_sender_list *send,
538 struct thread_entry *owner)
180{ 539{
181 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 540 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
182 corelock_lock(&q->cl); 541 corelock_lock(&q->cl);
183 542
184 q->send = NULL; 543 if(send != NULL && q->send == NULL)
185 if(send != NULL)
186 { 544 {
187 memset(send, 0, sizeof(*send)); 545 memset(send, 0, sizeof(*send));
546#ifdef HAVE_PRIORITY_SCHEDULING
547 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
548 send->blocker.priority = PRIORITY_IDLE;
549 send->blocker.thread = owner;
550 if(owner != NULL)
551 q->blocker_p = &send->blocker;
552#endif
188 q->send = send; 553 q->send = send;
189 } 554 }
190 555
191 corelock_unlock(&q->cl); 556 corelock_unlock(&q->cl);
192 set_irq_level(oldlevel); 557 set_irq_level(oldlevel);
558
559 (void)owner;
193} 560}
561
562/* Unblock a blocked thread at a given event index */
563static inline void queue_do_unblock_sender(struct queue_sender_list *send,
564 unsigned int i)
565{
566 if(send)
567 {
568 struct thread_entry **spp = &send->senders[i];
569
570 if(*spp)
571 {
572 queue_release_sender(spp, 0);
573 }
574 }
575}
576
577/* Perform the auto-reply sequence */
578static inline void queue_do_auto_reply(struct queue_sender_list *send)
579{
580 if(send && send->curr_sender)
581 {
582 /* auto-reply */
583 queue_release_sender(&send->curr_sender, 0);
584 }
585}
586
587/* Moves waiting thread's refrence from the senders array to the
588 * current_sender which represents the thread waiting for a reponse to the
589 * last message removed from the queue. This also protects the thread from
590 * being bumped due to overflow which would not be a valid action since its
591 * message _is_ being processed at this point. */
592static inline void queue_do_fetch_sender(struct queue_sender_list *send,
593 unsigned int rd)
594{
595 if(send)
596 {
597 struct thread_entry **spp = &send->senders[rd];
598
599 if(*spp)
600 {
601 /* Move thread reference from array to the next thread
602 that queue_reply will release */
603 send->curr_sender = *spp;
604 (*spp)->retval = (intptr_t)spp;
605 *spp = NULL;
606 }
607 /* else message was posted asynchronously with queue_post */
608 }
609}
610#else
611/* Empty macros for when synchoronous sending is not made */
612#define queue_release_all_senders(q)
613#define queue_do_unblock_sender(send, i)
614#define queue_do_auto_reply(send)
615#define queue_do_fetch_sender(send, rd)
194#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 616#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
195 617
196/* Queue must not be available for use during this call */ 618/* Queue must not be available for use during this call */
@@ -204,11 +626,12 @@ void queue_init(struct event_queue *q, bool register_queue)
204 } 626 }
205 627
206 corelock_init(&q->cl); 628 corelock_init(&q->cl);
207 thread_queue_init(&q->queue); 629 q->queue = NULL;
208 q->read = 0; 630 q->read = 0;
209 q->write = 0; 631 q->write = 0;
210#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 632#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
211 q->send = NULL; /* No message sending by default */ 633 q->send = NULL; /* No message sending by default */
634 IF_PRIO( q->blocker_p = NULL; )
212#endif 635#endif
213 636
214 if(register_queue) 637 if(register_queue)
@@ -254,14 +677,20 @@ void queue_delete(struct event_queue *q)
254 677
255 corelock_unlock(&all_queues.cl); 678 corelock_unlock(&all_queues.cl);
256 679
257 /* Release threads waiting on queue head */ 680 /* Release thread(s) waiting on queue head */
258 thread_queue_wake(&q->queue); 681 thread_queue_wake(&q->queue);
259 682
260#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 683#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 /* Release waiting threads for reply and reply to any dequeued 684 if(q->send)
262 message waiting for one. */ 685 {
263 queue_release_all_senders(q); 686 /* Release threads waiting for replies */
264 queue_reply(q, 0); 687 queue_release_all_senders(q);
688
689 /* Reply to any dequeued message waiting for one */
690 queue_do_auto_reply(q->send);
691
692 q->send = NULL;
693 }
265#endif 694#endif
266 695
267 q->read = 0; 696 q->read = 0;
@@ -279,33 +708,32 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
279 int oldlevel; 708 int oldlevel;
280 unsigned int rd; 709 unsigned int rd;
281 710
711#ifdef HAVE_PRIORITY_SCHEDULING
712 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
713 QUEUE_GET_THREAD(q) == thread_get_current(),
714 "queue_wait->wrong thread\n");
715#endif
716
282 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 717 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
283 corelock_lock(&q->cl); 718 corelock_lock(&q->cl);
284 719
285#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 720 /* auto-reply */
286 if(q->send && q->send->curr_sender) 721 queue_do_auto_reply(q->send);
287 {
288 /* auto-reply */
289 queue_release_sender(&q->send->curr_sender, 0);
290 }
291#endif
292 722
293 if (q->read == q->write) 723 if (q->read == q->write)
294 { 724 {
725 struct thread_entry *current = cores[CURRENT_CORE].running;
726
295 do 727 do
296 { 728 {
297#if CONFIG_CORELOCK == CORELOCK_NONE 729 IF_COP( current->obj_cl = &q->cl; )
298#elif CONFIG_CORELOCK == SW_CORELOCK 730 current->bqp = &q->queue;
299 const unsigned int core = CURRENT_CORE; 731
300 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 732 block_thread(current);
301 cores[core].blk_ops.cl_p = &q->cl; 733
302#elif CONFIG_CORELOCK == CORELOCK_SWAP 734 corelock_unlock(&q->cl);
303 const unsigned int core = CURRENT_CORE; 735 switch_thread();
304 cores[core].blk_ops.flags = TBOP_SET_VARu8; 736
305 cores[core].blk_ops.var_u8p = &q->cl.locked;
306 cores[core].blk_ops.var_u8v = 0;
307#endif /* CONFIG_CORELOCK */
308 block_thread(&q->queue);
309 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 737 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
310 corelock_lock(&q->cl); 738 corelock_lock(&q->cl);
311 } 739 }
@@ -316,13 +744,8 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
316 rd = q->read++ & QUEUE_LENGTH_MASK; 744 rd = q->read++ & QUEUE_LENGTH_MASK;
317 *ev = q->events[rd]; 745 *ev = q->events[rd];
318 746
319#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 747 /* Get data for a waiting thread if one */
320 if(q->send && q->send->senders[rd]) 748 queue_do_fetch_sender(q->send, rd);
321 {
322 /* Get data for a waiting thread if one */
323 queue_fetch_sender(q->send, rd);
324 }
325#endif
326 749
327 corelock_unlock(&q->cl); 750 corelock_unlock(&q->cl);
328 set_irq_level(oldlevel); 751 set_irq_level(oldlevel);
@@ -332,31 +755,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
332{ 755{
333 int oldlevel; 756 int oldlevel;
334 757
758#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
759 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
760 QUEUE_GET_THREAD(q) == thread_get_current(),
761 "queue_wait_w_tmo->wrong thread\n");
762#endif
763
335 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 764 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
336 corelock_lock(&q->cl); 765 corelock_lock(&q->cl);
337 766
338#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 767 /* Auto-reply */
339 if (q->send && q->send->curr_sender) 768 queue_do_auto_reply(q->send);
340 {
341 /* auto-reply */
342 queue_release_sender(&q->send->curr_sender, 0);
343 }
344#endif
345 769
346 if (q->read == q->write && ticks > 0) 770 if (q->read == q->write && ticks > 0)
347 { 771 {
348#if CONFIG_CORELOCK == CORELOCK_NONE 772 struct thread_entry *current = cores[CURRENT_CORE].running;
349#elif CONFIG_CORELOCK == SW_CORELOCK 773
350 const unsigned int core = CURRENT_CORE; 774 IF_COP( current->obj_cl = &q->cl; )
351 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 775 current->bqp = &q->queue;
352 cores[core].blk_ops.cl_p = &q->cl; 776
353#elif CONFIG_CORELOCK == CORELOCK_SWAP 777 block_thread_w_tmo(current, ticks);
354 const unsigned int core = CURRENT_CORE; 778 corelock_unlock(&q->cl);
355 cores[core].blk_ops.flags = TBOP_SET_VARu8; 779
356 cores[core].blk_ops.var_u8p = &q->cl.locked; 780 switch_thread();
357 cores[core].blk_ops.var_u8v = 0; 781
358#endif
359 block_thread_w_tmo(&q->queue, ticks);
360 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 782 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
361 corelock_lock(&q->cl); 783 corelock_lock(&q->cl);
362 } 784 }
@@ -367,14 +789,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
367 { 789 {
368 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; 790 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
369 *ev = q->events[rd]; 791 *ev = q->events[rd];
370 792 /* Get data for a waiting thread if one */
371#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 793 queue_do_fetch_sender(q->send, rd);
372 if(q->send && q->send->senders[rd])
373 {
374 /* Get data for a waiting thread if one */
375 queue_fetch_sender(q->send, rd);
376 }
377#endif
378 } 794 }
379 else 795 else
380 { 796 {
@@ -398,18 +814,8 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
398 q->events[wr].id = id; 814 q->events[wr].id = id;
399 q->events[wr].data = data; 815 q->events[wr].data = data;
400 816
401#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 817 /* overflow protect - unblock any thread waiting at this index */
402 if(q->send) 818 queue_do_unblock_sender(q->send, wr);
403 {
404 struct thread_entry **spp = &q->send->senders[wr];
405
406 if (*spp)
407 {
408 /* overflow protect - unblock any thread waiting at this index */
409 queue_release_sender(spp, 0);
410 }
411 }
412#endif
413 819
414 /* Wakeup a waiting thread if any */ 820 /* Wakeup a waiting thread if any */
415 wakeup_thread(&q->queue); 821 wakeup_thread(&q->queue);
@@ -436,8 +842,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
436 842
437 if(q->send) 843 if(q->send)
438 { 844 {
439 const unsigned int core = CURRENT_CORE; 845 struct queue_sender_list *send = q->send;
440 struct thread_entry **spp = &q->send->senders[wr]; 846 struct thread_entry **spp = &send->senders[wr];
847 struct thread_entry *current = cores[CURRENT_CORE].running;
441 848
442 if(*spp) 849 if(*spp)
443 { 850 {
@@ -448,17 +855,20 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
448 /* Wakeup a waiting thread if any */ 855 /* Wakeup a waiting thread if any */
449 wakeup_thread(&q->queue); 856 wakeup_thread(&q->queue);
450 857
451#if CONFIG_CORELOCK == CORELOCK_NONE 858 /* Save thread in slot, add to list and wait for reply */
452#elif CONFIG_CORELOCK == SW_CORELOCK 859 *spp = current;
453 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 860 IF_COP( current->obj_cl = &q->cl; )
454 cores[core].blk_ops.cl_p = &q->cl; 861 IF_PRIO( current->blocker = q->blocker_p; )
455#elif CONFIG_CORELOCK == CORELOCK_SWAP 862 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
456 cores[core].blk_ops.flags = TBOP_SET_VARu8; 863 current->retval = (intptr_t)spp;
457 cores[core].blk_ops.var_u8p = &q->cl.locked; 864 current->bqp = &send->list;
458 cores[core].blk_ops.var_u8v = 0; 865
459#endif 866 block_thread(current);
460 block_thread_no_listlock(spp); 867
461 return cores[core].running->retval; 868 corelock_unlock(&q->cl);
869 switch_thread();
870
871 return current->retval;
462 } 872 }
463 873
464 /* Function as queue_post if sending is not enabled */ 874 /* Function as queue_post if sending is not enabled */
@@ -497,37 +907,22 @@ void queue_reply(struct event_queue *q, intptr_t retval)
497{ 907{
498 if(q->send && q->send->curr_sender) 908 if(q->send && q->send->curr_sender)
499 { 909 {
500#if NUM_CORES > 1
501 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 910 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
502 corelock_lock(&q->cl); 911 corelock_lock(&q->cl);
503 /* Double-check locking */ 912 /* Double-check locking */
504 if(q->send && q->send->curr_sender) 913 IF_COP( if(q->send && q->send->curr_sender) )
505 { 914 {
506#endif
507
508 queue_release_sender(&q->send->curr_sender, retval); 915 queue_release_sender(&q->send->curr_sender, retval);
509
510#if NUM_CORES > 1
511 } 916 }
917
512 corelock_unlock(&q->cl); 918 corelock_unlock(&q->cl);
513 set_irq_level(oldlevel); 919 set_irq_level(oldlevel);
514#endif
515 } 920 }
516} 921}
517#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
518
519/* Poll queue to see if a message exists - careful in using the result if
520 * queue_remove_from_head is called when messages are posted - possibly use
521 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
522 * unsignals the queue may cause an unwanted block */
523bool queue_empty(const struct event_queue* q)
524{
525 return ( q->read == q->write );
526}
527 922
528bool queue_peek(struct event_queue *q, struct queue_event *ev) 923bool queue_peek(struct event_queue *q, struct queue_event *ev)
529{ 924{
530 if (q->read == q->write) 925 if(q->read == q->write)
531 return false; 926 return false;
532 927
533 bool have_msg = false; 928 bool have_msg = false;
@@ -535,7 +930,7 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
535 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 930 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
536 corelock_lock(&q->cl); 931 corelock_lock(&q->cl);
537 932
538 if (q->read != q->write) 933 if(q->read != q->write)
539 { 934 {
540 *ev = q->events[q->read & QUEUE_LENGTH_MASK]; 935 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
541 have_msg = true; 936 have_msg = true;
@@ -546,6 +941,16 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
546 941
547 return have_msg; 942 return have_msg;
548} 943}
944#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
945
946/* Poll queue to see if a message exists - careful in using the result if
947 * queue_remove_from_head is called when messages are posted - possibly use
948 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
949 * unsignals the queue may cause an unwanted block */
950bool queue_empty(const struct event_queue* q)
951{
952 return ( q->read == q->write );
953}
549 954
550void queue_clear(struct event_queue* q) 955void queue_clear(struct event_queue* q)
551{ 956{
@@ -554,11 +959,9 @@ void queue_clear(struct event_queue* q)
554 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 959 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
555 corelock_lock(&q->cl); 960 corelock_lock(&q->cl);
556 961
557#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
558 /* Release all threads waiting in the queue for a reply - 962 /* Release all threads waiting in the queue for a reply -
559 dequeued sent message will be handled by owning thread */ 963 dequeued sent message will be handled by owning thread */
560 queue_release_all_senders(q); 964 queue_release_all_senders(q);
561#endif
562 965
563 q->read = 0; 966 q->read = 0;
564 q->write = 0; 967 q->write = 0;
@@ -583,18 +986,9 @@ void queue_remove_from_head(struct event_queue *q, long id)
583 break; 986 break;
584 } 987 }
585 988
586#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 989 /* Release any thread waiting on this message */
587 if(q->send) 990 queue_do_unblock_sender(q->send, rd);
588 {
589 struct thread_entry **spp = &q->send->senders[rd];
590 991
591 if (*spp)
592 {
593 /* Release any thread waiting on this message */
594 queue_release_sender(spp, 0);
595 }
596 }
597#endif
598 q->read++; 992 q->read++;
599 } 993 }
600 994
@@ -636,397 +1030,72 @@ int queue_broadcast(long id, intptr_t data)
636} 1030}
637 1031
638/**************************************************************************** 1032/****************************************************************************
639 * Timer tick
640 ****************************************************************************/
641#if CONFIG_CPU == SH7034
642void tick_start(unsigned int interval_in_ms)
643{
644 unsigned long count;
645
646 count = CPU_FREQ * interval_in_ms / 1000 / 8;
647
648 if(count > 0x10000)
649 {
650 panicf("Error! The tick interval is too long (%d ms)\n",
651 interval_in_ms);
652 return;
653 }
654
655 /* We are using timer 0 */
656
657 TSTR &= ~0x01; /* Stop the timer */
658 TSNC &= ~0x01; /* No synchronization */
659 TMDR &= ~0x01; /* Operate normally */
660
661 TCNT0 = 0; /* Start counting at 0 */
662 GRA0 = (unsigned short)(count - 1);
663 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
664
665 /* Enable interrupt on level 1 */
666 IPRC = (IPRC & ~0x00f0) | 0x0010;
667
668 TSR0 &= ~0x01;
669 TIER0 = 0xf9; /* Enable GRA match interrupt */
670
671 TSTR |= 0x01; /* Start timer 1 */
672}
673
674void IMIA0(void) __attribute__ ((interrupt_handler));
675void IMIA0(void)
676{
677 int i;
678
679 /* Run through the list of tick tasks */
680 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
681 {
682 if(tick_funcs[i])
683 {
684 tick_funcs[i]();
685 }
686 }
687
688 current_tick++;
689
690 TSR0 &= ~0x01;
691}
692#elif defined(CPU_COLDFIRE)
693void tick_start(unsigned int interval_in_ms)
694{
695 unsigned long count;
696 int prescale;
697
698 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
699
700 if(count > 0x10000)
701 {
702 panicf("Error! The tick interval is too long (%d ms)\n",
703 interval_in_ms);
704 return;
705 }
706
707 prescale = cpu_frequency / CPU_FREQ;
708 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
709 changes within timer.c */
710
711 /* We are using timer 0 */
712
713 TRR0 = (unsigned short)(count - 1); /* The reference count */
714 TCN0 = 0; /* reset the timer */
715 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
716 /* restart, CLK/16, enabled, prescaler */
717
718 TER0 = 0xff; /* Clear all events */
719
720 ICR1 = 0x8c; /* Interrupt on level 3.0 */
721 IMR &= ~0x200;
722}
723
724void TIMER0(void) __attribute__ ((interrupt_handler));
725void TIMER0(void)
726{
727 int i;
728
729 /* Run through the list of tick tasks */
730 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
731 {
732 if(tick_funcs[i])
733 {
734 tick_funcs[i]();
735 }
736 }
737
738 current_tick++;
739
740 TER0 = 0xff; /* Clear all events */
741}
742
743#elif defined(CPU_PP)
744
745#ifndef BOOTLOADER
746void TIMER1(void)
747{
748 int i;
749
750 /* Run through the list of tick tasks (using main core) */
751 TIMER1_VAL; /* Read value to ack IRQ */
752
753 /* Run through the list of tick tasks using main CPU core -
754 wake up the COP through its control interface to provide pulse */
755 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
756 {
757 if (tick_funcs[i])
758 {
759 tick_funcs[i]();
760 }
761 }
762
763#if NUM_CORES > 1
764 /* Pulse the COP */
765 core_wake(COP);
766#endif /* NUM_CORES */
767
768 current_tick++;
769}
770#endif
771
772/* Must be last function called init kernel/thread initialization */
773void tick_start(unsigned int interval_in_ms)
774{
775#ifndef BOOTLOADER
776 TIMER1_CFG = 0x0;
777 TIMER1_VAL;
778 /* enable timer */
779 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
780 /* unmask interrupt source */
781 CPU_INT_EN = TIMER1_MASK;
782#else
783 /* We don't enable interrupts in the bootloader */
784 (void)interval_in_ms;
785#endif
786}
787
788#elif CONFIG_CPU == PNX0101
789
790void timer_handler(void)
791{
792 int i;
793
794 /* Run through the list of tick tasks */
795 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
796 {
797 if(tick_funcs[i])
798 tick_funcs[i]();
799 }
800
801 current_tick++;
802
803 TIMER0.clr = 0;
804}
805
806void tick_start(unsigned int interval_in_ms)
807{
808 TIMER0.ctrl &= ~0x80; /* Disable the counter */
809 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
810 TIMER0.load = 3000000 * interval_in_ms / 1000;
811 TIMER0.ctrl &= ~0xc; /* No prescaler */
812 TIMER0.clr = 1; /* Clear the interrupt request */
813
814 irq_set_int_handler(IRQ_TIMER0, timer_handler);
815 irq_enable_int(IRQ_TIMER0);
816
817 TIMER0.ctrl |= 0x80; /* Enable the counter */
818}
819#endif
820
821int tick_add_task(void (*f)(void))
822{
823 int i;
824 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
825
826 /* Add a task if there is room */
827 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
828 {
829 if(tick_funcs[i] == NULL)
830 {
831 tick_funcs[i] = f;
832 set_irq_level(oldlevel);
833 return 0;
834 }
835 }
836 set_irq_level(oldlevel);
837 panicf("Error! tick_add_task(): out of tasks");
838 return -1;
839}
840
841int tick_remove_task(void (*f)(void))
842{
843 int i;
844 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
845
846 /* Remove a task if it is there */
847 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
848 {
849 if(tick_funcs[i] == f)
850 {
851 tick_funcs[i] = NULL;
852 set_irq_level(oldlevel);
853 return 0;
854 }
855 }
856
857 set_irq_level(oldlevel);
858 return -1;
859}
860
861/****************************************************************************
862 * Tick-based interval timers/one-shots - be mindful this is not really
863 * intended for continuous timers but for events that need to run for a short
864 * time and be cancelled without further software intervention.
865 ****************************************************************************/
866#ifdef INCLUDE_TIMEOUT_API
867static struct timeout *tmo_list = NULL; /* list of active timeout events */
868
869/* timeout tick task - calls event handlers when they expire
870 * Event handlers may alter ticks, callback and data during operation.
871 */
872static void timeout_tick(void)
873{
874 unsigned long tick = current_tick;
875 struct timeout *curr, *next;
876
877 for (curr = tmo_list; curr != NULL; curr = next)
878 {
879 next = (struct timeout *)curr->next;
880
881 if (TIME_BEFORE(tick, curr->expires))
882 continue;
883
884 /* this event has expired - call callback */
885 if (curr->callback(curr))
886 *(long *)&curr->expires = tick + curr->ticks; /* reload */
887 else
888 timeout_cancel(curr); /* cancel */
889 }
890}
891
892/* Cancels a timeout callback - can be called from the ISR */
893void timeout_cancel(struct timeout *tmo)
894{
895 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
896
897 if (tmo_list != NULL)
898 {
899 struct timeout *curr = tmo_list;
900 struct timeout *prev = NULL;
901
902 while (curr != tmo && curr != NULL)
903 {
904 prev = curr;
905 curr = (struct timeout *)curr->next;
906 }
907
908 if (curr != NULL)
909 {
910 /* in list */
911 if (prev == NULL)
912 tmo_list = (struct timeout *)curr->next;
913 else
914 *(const struct timeout **)&prev->next = curr->next;
915
916 if (tmo_list == NULL)
917 tick_remove_task(timeout_tick); /* last one - remove task */
918 }
919 /* not in list or tmo == NULL */
920 }
921
922 set_irq_level(oldlevel);
923}
924
925/* Adds a timeout callback - calling with an active timeout resets the
926 interval - can be called from the ISR */
927void timeout_register(struct timeout *tmo, timeout_cb_type callback,
928 int ticks, intptr_t data)
929{
930 int oldlevel;
931 struct timeout *curr;
932
933 if (tmo == NULL)
934 return;
935
936 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
937
938 /* see if this one is already registered */
939 curr = tmo_list;
940 while (curr != tmo && curr != NULL)
941 curr = (struct timeout *)curr->next;
942
943 if (curr == NULL)
944 {
945 /* not found - add it */
946 if (tmo_list == NULL)
947 tick_add_task(timeout_tick); /* first one - add task */
948
949 *(struct timeout **)&tmo->next = tmo_list;
950 tmo_list = tmo;
951 }
952
953 tmo->callback = callback;
954 tmo->ticks = ticks;
955 tmo->data = data;
956 *(long *)&tmo->expires = current_tick + ticks;
957
958 set_irq_level(oldlevel);
959}
960
961#endif /* INCLUDE_TIMEOUT_API */
962
963/****************************************************************************
964 * Simple mutex functions ;) 1033 * Simple mutex functions ;)
965 ****************************************************************************/ 1034 ****************************************************************************/
1035
1036/* Initialize a mutex object - call before any use and do not call again once
1037 * the object is available to other threads */
966void mutex_init(struct mutex *m) 1038void mutex_init(struct mutex *m)
967{ 1039{
1040 corelock_init(&m->cl);
968 m->queue = NULL; 1041 m->queue = NULL;
969 m->thread = NULL;
970 m->count = 0; 1042 m->count = 0;
971 m->locked = 0; 1043 m->locked = 0;
972#if CONFIG_CORELOCK == SW_CORELOCK 1044 MUTEX_SET_THREAD(m, NULL);
973 corelock_init(&m->cl); 1045#ifdef HAVE_PRIORITY_SCHEDULING
1046 m->blocker.priority = PRIORITY_IDLE;
1047 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
1048 m->no_preempt = false;
974#endif 1049#endif
975} 1050}
976 1051
1052/* Gain ownership of a mutex object or block until it becomes free */
977void mutex_lock(struct mutex *m) 1053void mutex_lock(struct mutex *m)
978{ 1054{
979 const unsigned int core = CURRENT_CORE; 1055 const unsigned int core = CURRENT_CORE;
980 struct thread_entry *const thread = cores[core].running; 1056 struct thread_entry *current = cores[core].running;
981 1057
982 if(thread == m->thread) 1058 if(current == MUTEX_GET_THREAD(m))
983 { 1059 {
1060 /* current thread already owns this mutex */
984 m->count++; 1061 m->count++;
985 return; 1062 return;
986 } 1063 }
987 1064
988 /* Repeat some stuff here or else all the variation is too difficult to 1065 /* lock out other cores */
989 read */
990#if CONFIG_CORELOCK == CORELOCK_SWAP
991 /* peek at lock until it's no longer busy */
992 unsigned int locked;
993 while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
994 if(locked == 0)
995 {
996 m->thread = thread;
997 m->locked = 1;
998 return;
999 }
1000
1001 /* Block until the lock is open... */
1002 cores[core].blk_ops.flags = TBOP_SET_VARu8;
1003 cores[core].blk_ops.var_u8p = &m->locked;
1004 cores[core].blk_ops.var_u8v = 1;
1005#else
1006 corelock_lock(&m->cl); 1066 corelock_lock(&m->cl);
1007 if (m->locked == 0) 1067
1068 if(m->locked == 0)
1008 { 1069 {
1070 /* lock is open */
1071 MUTEX_SET_THREAD(m, current);
1009 m->locked = 1; 1072 m->locked = 1;
1010 m->thread = thread;
1011 corelock_unlock(&m->cl); 1073 corelock_unlock(&m->cl);
1012 return; 1074 return;
1013 } 1075 }
1014 1076
1015 /* Block until the lock is open... */ 1077 /* block until the lock is open... */
1016#if CONFIG_CORELOCK == SW_CORELOCK 1078 IF_COP( current->obj_cl = &m->cl; )
1017 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 1079 IF_PRIO( current->blocker = &m->blocker; )
1018 cores[core].blk_ops.cl_p = &m->cl; 1080 current->bqp = &m->queue;
1019#endif 1081
1020#endif /* CONFIG_CORELOCK */ 1082 set_irq_level(HIGHEST_IRQ_LEVEL);
1083 block_thread(current);
1021 1084
1022 block_thread_no_listlock(&m->queue); 1085 corelock_unlock(&m->cl);
1086
1087 /* ...and turn control over to next thread */
1088 switch_thread();
1023} 1089}
1024 1090
1091/* Release ownership of a mutex object - only owning thread must call this */
1025void mutex_unlock(struct mutex *m) 1092void mutex_unlock(struct mutex *m)
1026{ 1093{
1027 /* unlocker not being the owner is an unlocking violation */ 1094 /* unlocker not being the owner is an unlocking violation */
1028 KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running, 1095 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(),
1029 "mutex_unlock->wrong thread (recurse)"); 1096 "mutex_unlock->wrong thread (%s != %s)\n",
1097 MUTEX_GET_THREAD(m)->name,
1098 thread_get_current()->name);
1030 1099
1031 if(m->count > 0) 1100 if(m->count > 0)
1032 { 1101 {
@@ -1035,37 +1104,33 @@ void mutex_unlock(struct mutex *m)
1035 return; 1104 return;
1036 } 1105 }
1037 1106
1038#if CONFIG_CORELOCK == SW_CORELOCK
1039 /* lock out other cores */ 1107 /* lock out other cores */
1040 corelock_lock(&m->cl); 1108 corelock_lock(&m->cl);
1041#elif CONFIG_CORELOCK == CORELOCK_SWAP
1042 /* wait for peeker to move on */
1043 while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
1044#endif
1045 1109
1046 /* transfer to next queued thread if any */ 1110 /* transfer to next queued thread if any */
1047 1111 if(m->queue == NULL)
1048 /* This can become busy using SWP but is safe since only one thread
1049 will be changing things at a time. Allowing timeout waits will
1050 change that however but not now. There is also a hazard the thread
1051 could be killed before performing the wakeup but that's just
1052 irresponsible. :-) */
1053 m->thread = m->queue;
1054
1055 if(m->thread == NULL)
1056 { 1112 {
1057 m->locked = 0; /* release lock */ 1113 /* no threads waiting - open the lock */
1058#if CONFIG_CORELOCK == SW_CORELOCK 1114 MUTEX_SET_THREAD(m, NULL);
1115 m->locked = 0;
1059 corelock_unlock(&m->cl); 1116 corelock_unlock(&m->cl);
1060#endif 1117 return;
1061 } 1118 }
1062 else /* another thread is waiting - remain locked */ 1119 else
1063 { 1120 {
1064 wakeup_thread_no_listlock(&m->queue); 1121 const int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1065#if CONFIG_CORELOCK == SW_CORELOCK 1122 /* Tranfer of owning thread is handled in the wakeup protocol
1123 * if priorities are enabled otherwise just set it from the
1124 * queue head. */
1125 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
1126 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
1127 set_irq_level(oldlevel);
1128
1066 corelock_unlock(&m->cl); 1129 corelock_unlock(&m->cl);
1067#elif CONFIG_CORELOCK == CORELOCK_SWAP 1130
1068 m->locked = 1; 1131#ifdef HAVE_PRIORITY_SCHEDULING
1132 if((result & THREAD_SWITCH) && !m->no_preempt)
1133 switch_thread();
1069#endif 1134#endif
1070 } 1135 }
1071} 1136}
@@ -1083,28 +1148,32 @@ void spinlock_init(struct spinlock *l)
1083 1148
1084void spinlock_lock(struct spinlock *l) 1149void spinlock_lock(struct spinlock *l)
1085{ 1150{
1086 struct thread_entry *const thread = cores[CURRENT_CORE].running; 1151 const unsigned int core = CURRENT_CORE;
1152 struct thread_entry *current = cores[core].running;
1087 1153
1088 if (l->thread == thread) 1154 if(l->thread == current)
1089 { 1155 {
1156 /* current core already owns it */
1090 l->count++; 1157 l->count++;
1091 return; 1158 return;
1092 } 1159 }
1093 1160
1161 /* lock against other processor cores */
1094 corelock_lock(&l->cl); 1162 corelock_lock(&l->cl);
1095 1163
1096 l->thread = thread; 1164 /* take ownership */
1165 l->thread = current;
1097} 1166}
1098 1167
1099void spinlock_unlock(struct spinlock *l) 1168void spinlock_unlock(struct spinlock *l)
1100{ 1169{
1101 /* unlocker not being the owner is an unlocking violation */ 1170 /* unlocker not being the owner is an unlocking violation */
1102 KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running, 1171 KERNEL_ASSERT(l->thread == thread_get_current(),
1103 "spinlock_unlock->wrong thread"); 1172 "spinlock_unlock->wrong thread\n");
1104 1173
1105 if (l->count > 0) 1174 if(l->count > 0)
1106 { 1175 {
1107 /* this thread still owns lock */ 1176 /* this core still owns lock */
1108 l->count--; 1177 l->count--;
1109 return; 1178 return;
1110 } 1179 }
@@ -1124,76 +1193,62 @@ void spinlock_unlock(struct spinlock *l)
1124void semaphore_init(struct semaphore *s, int max, int start) 1193void semaphore_init(struct semaphore *s, int max, int start)
1125{ 1194{
1126 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, 1195 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1127 "semaphore_init->inv arg"); 1196 "semaphore_init->inv arg\n");
1128 s->queue = NULL; 1197 s->queue = NULL;
1129 s->max = max; 1198 s->max = max;
1130 s->count = start; 1199 s->count = start;
1131#if CONFIG_CORELOCK == SW_CORELOCK
1132 corelock_init(&s->cl); 1200 corelock_init(&s->cl);
1133#endif
1134} 1201}
1135 1202
1136void semaphore_wait(struct semaphore *s) 1203void semaphore_wait(struct semaphore *s)
1137{ 1204{
1138#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1205 struct thread_entry *current;
1206
1139 corelock_lock(&s->cl); 1207 corelock_lock(&s->cl);
1208
1140 if(--s->count >= 0) 1209 if(--s->count >= 0)
1141 { 1210 {
1211 /* wait satisfied */
1142 corelock_unlock(&s->cl); 1212 corelock_unlock(&s->cl);
1143 return; 1213 return;
1144 } 1214 }
1145#elif CONFIG_CORELOCK == CORELOCK_SWAP
1146 int count;
1147 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1148 if(--count >= 0)
1149 {
1150 s->count = count;
1151 return;
1152 }
1153#endif
1154 1215
1155 /* too many waits - block until dequeued */ 1216 /* too many waits - block until dequeued... */
1156#if CONFIG_CORELOCK == SW_CORELOCK 1217 current = cores[CURRENT_CORE].running;
1157 const unsigned int core = CURRENT_CORE; 1218
1158 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 1219 IF_COP( current->obj_cl = &s->cl; )
1159 cores[core].blk_ops.cl_p = &s->cl; 1220 current->bqp = &s->queue;
1160#elif CONFIG_CORELOCK == CORELOCK_SWAP 1221
1161 const unsigned int core = CURRENT_CORE; 1222 set_irq_level(HIGHEST_IRQ_LEVEL);
1162 cores[core].blk_ops.flags = TBOP_SET_VARi; 1223 block_thread(current);
1163 cores[core].blk_ops.var_ip = &s->count; 1224
1164 cores[core].blk_ops.var_iv = count; 1225 corelock_unlock(&s->cl);
1165#endif 1226
1166 block_thread_no_listlock(&s->queue); 1227 /* ...and turn control over to next thread */
1228 switch_thread();
1167} 1229}
1168 1230
1169void semaphore_release(struct semaphore *s) 1231void semaphore_release(struct semaphore *s)
1170{ 1232{
1171#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1233 IF_PRIO( unsigned int result = THREAD_NONE; )
1234
1172 corelock_lock(&s->cl); 1235 corelock_lock(&s->cl);
1173 if (s->count < s->max)
1174 {
1175 if (++s->count <= 0)
1176 {
1177#elif CONFIG_CORELOCK == CORELOCK_SWAP
1178 int count;
1179 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1180 if(count < s->max)
1181 {
1182 if(++count <= 0)
1183 {
1184#endif /* CONFIG_CORELOCK */
1185 1236
1186 /* there should be threads in this queue */ 1237 if(s->count < s->max && ++s->count <= 0)
1187 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup"); 1238 {
1188 /* a thread was queued - wake it up */ 1239 /* there should be threads in this queue */
1189 wakeup_thread_no_listlock(&s->queue); 1240 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1190 } 1241 /* a thread was queued - wake it up */
1242 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1243 IF_PRIO( result = ) wakeup_thread(&s->queue);
1244 set_irq_level(oldlevel);
1191 } 1245 }
1192 1246
1193#if CONFIG_CORELOCK == SW_CORELOCK
1194 corelock_unlock(&s->cl); 1247 corelock_unlock(&s->cl);
1195#elif CONFIG_CORELOCK == CORELOCK_SWAP 1248
1196 s->count = count; 1249#ifdef HAVE_PRIORITY_SCHEDULING
1250 if(result & THREAD_SWITCH)
1251 switch_thread();
1197#endif 1252#endif
1198} 1253}
1199#endif /* HAVE_SEMAPHORE_OBJECTS */ 1254#endif /* HAVE_SEMAPHORE_OBJECTS */
@@ -1208,117 +1263,107 @@ void event_init(struct event *e, unsigned int flags)
1208 e->queues[STATE_SIGNALED] = NULL; 1263 e->queues[STATE_SIGNALED] = NULL;
1209 e->state = flags & STATE_SIGNALED; 1264 e->state = flags & STATE_SIGNALED;
1210 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0; 1265 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
1211#if CONFIG_CORELOCK == SW_CORELOCK
1212 corelock_init(&e->cl); 1266 corelock_init(&e->cl);
1213#endif
1214} 1267}
1215 1268
1216void event_wait(struct event *e, unsigned int for_state) 1269void event_wait(struct event *e, unsigned int for_state)
1217{ 1270{
1218 unsigned int last_state; 1271 struct thread_entry *current;
1219#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1272
1220 corelock_lock(&e->cl); 1273 corelock_lock(&e->cl);
1221 last_state = e->state;
1222#elif CONFIG_CORELOCK == CORELOCK_SWAP
1223 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1224#endif
1225 1274
1226 if(e->automatic != 0) 1275 if(e->automatic != 0)
1227 { 1276 {
1228 /* wait for false always satisfied by definition 1277 /* wait for false always satisfied by definition
1229 or if it just changed to false */ 1278 or if it just changed to false */
1230 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED) 1279 if(e->state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
1231 { 1280 {
1232 /* automatic - unsignal */ 1281 /* automatic - unsignal */
1233 e->state = STATE_NONSIGNALED; 1282 e->state = STATE_NONSIGNALED;
1234#if CONFIG_CORELOCK == SW_CORELOCK
1235 corelock_unlock(&e->cl); 1283 corelock_unlock(&e->cl);
1236#endif
1237 return; 1284 return;
1238 } 1285 }
1239 /* block until state matches */ 1286 /* block until state matches */
1240 } 1287 }
1241 else if(for_state == last_state) 1288 else if(for_state == e->state)
1242 { 1289 {
1243 /* the state being waited for is the current state */ 1290 /* the state being waited for is the current state */
1244#if CONFIG_CORELOCK == SW_CORELOCK
1245 corelock_unlock(&e->cl); 1291 corelock_unlock(&e->cl);
1246#elif CONFIG_CORELOCK == CORELOCK_SWAP
1247 e->state = last_state;
1248#endif
1249 return; 1292 return;
1250 } 1293 }
1251 1294
1252 { 1295 /* block until state matches what callers requests */
1253 /* current state does not match wait-for state */ 1296 current = cores[CURRENT_CORE].running;
1254#if CONFIG_CORELOCK == SW_CORELOCK 1297
1255 const unsigned int core = CURRENT_CORE; 1298 IF_COP( current->obj_cl = &e->cl; )
1256 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 1299 current->bqp = &e->queues[for_state];
1257 cores[core].blk_ops.cl_p = &e->cl; 1300
1258#elif CONFIG_CORELOCK == CORELOCK_SWAP 1301 set_irq_level(HIGHEST_IRQ_LEVEL);
1259 const unsigned int core = CURRENT_CORE; 1302 block_thread(current);
1260 cores[core].blk_ops.flags = TBOP_SET_VARu8; 1303
1261 cores[core].blk_ops.var_u8p = &e->state; 1304 corelock_unlock(&e->cl);
1262 cores[core].blk_ops.var_u8v = last_state; 1305
1263#endif 1306 /* turn control over to next thread */
1264 block_thread_no_listlock(&e->queues[for_state]); 1307 switch_thread();
1265 }
1266} 1308}
1267 1309
1268void event_set_state(struct event *e, unsigned int state) 1310void event_set_state(struct event *e, unsigned int state)
1269{ 1311{
1270 unsigned int last_state; 1312 unsigned int result;
1271#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1313 int oldlevel;
1314
1272 corelock_lock(&e->cl); 1315 corelock_lock(&e->cl);
1273 last_state = e->state;
1274#elif CONFIG_CORELOCK == CORELOCK_SWAP
1275 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1276#endif
1277 1316
1278 if(last_state == state) 1317 if(e->state == state)
1279 { 1318 {
1280 /* no change */ 1319 /* no change */
1281#if CONFIG_CORELOCK == SW_CORELOCK
1282 corelock_unlock(&e->cl); 1320 corelock_unlock(&e->cl);
1283#elif CONFIG_CORELOCK == CORELOCK_SWAP
1284 e->state = last_state;
1285#endif
1286 return; 1321 return;
1287 } 1322 }
1288 1323
1324 IF_PRIO( result = THREAD_OK; )
1325
1326 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1327
1289 if(state == STATE_SIGNALED) 1328 if(state == STATE_SIGNALED)
1290 { 1329 {
1291 if(e->automatic != 0) 1330 if(e->automatic != 0)
1292 { 1331 {
1293 struct thread_entry *thread; 1332 /* no thread should have ever blocked for nonsignaled */
1294 /* no thread should have ever blocked for unsignaled */
1295 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL, 1333 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
1296 "set_event_state->queue[NS]:S"); 1334 "set_event_state->queue[NS]:S\n");
1297 /* pass to next thread and keep unsignaled - "pulse" */ 1335 /* pass to next thread and keep unsignaled - "pulse" */
1298 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]); 1336 result = wakeup_thread(&e->queues[STATE_SIGNALED]);
1299 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED; 1337 e->state = (result & THREAD_OK) ? STATE_NONSIGNALED : STATE_SIGNALED;
1300 } 1338 }
1301 else 1339 else
1302 { 1340 {
1303 /* release all threads waiting for signaled */ 1341 /* release all threads waiting for signaled */
1304 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
1305 e->state = STATE_SIGNALED; 1342 e->state = STATE_SIGNALED;
1343 IF_PRIO( result = )
1344 thread_queue_wake(&e->queues[STATE_SIGNALED]);
1306 } 1345 }
1307 } 1346 }
1308 else 1347 else
1309 { 1348 {
1310 /* release all threads waiting for unsignaled */ 1349 /* release all threads waiting for nonsignaled */
1311 1350
1312 /* no thread should have ever blocked if automatic */ 1351 /* no thread should have ever blocked if automatic */
1313 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL || 1352 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
1314 e->automatic == 0, "set_event_state->queue[NS]:NS"); 1353 e->automatic == 0, "set_event_state->queue[NS]:NS\n");
1315 1354
1316 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
1317 e->state = STATE_NONSIGNALED; 1355 e->state = STATE_NONSIGNALED;
1356 IF_PRIO( result = )
1357 thread_queue_wake(&e->queues[STATE_NONSIGNALED]);
1318 } 1358 }
1319 1359
1320#if CONFIG_CORELOCK == SW_CORELOCK 1360 set_irq_level(oldlevel);
1361
1321 corelock_unlock(&e->cl); 1362 corelock_unlock(&e->cl);
1363
1364#ifdef HAVE_PRIORITY_SCHEDULING
1365 if(result & THREAD_SWITCH)
1366 switch_thread();
1322#endif 1367#endif
1323} 1368}
1324#endif /* HAVE_EVENT_OBJECTS */ 1369#endif /* HAVE_EVENT_OBJECTS */