summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
authorMiika Pekkarinen <miipekk@ihme.org>2006-09-16 16:18:11 +0000
committerMiika Pekkarinen <miipekk@ihme.org>2006-09-16 16:18:11 +0000
commita85044bf9eaa0a7206c1978d3cfd57ab2d7fae2f (patch)
treea30695ed540bf32365d577f46398f712c7a494c4 /firmware/kernel.c
parentbaf5494341cdd6cdb9590e21d429920b9bc4a2c6 (diff)
downloadrockbox-a85044bf9eaa0a7206c1978d3cfd57ab2d7fae2f.tar.gz
rockbox-a85044bf9eaa0a7206c1978d3cfd57ab2d7fae2f.zip
New scheduler, with priorities for swcodec platforms. Frequent task
switching should be more efficient and tasks are stored in linked lists to eliminate unnecessary task switching to improve performance. Audio should no longer skip on swcodec targets caused by too CPU hungry UI thread or background threads. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@10958 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c65
1 files changed, 31 insertions, 34 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 889f950252..4a6d61515a 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -35,7 +35,6 @@ static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
35static struct event_queue *all_queues[32]; 35static struct event_queue *all_queues[32];
36static int num_queues; 36static int num_queues;
37 37
38void sleep(int ticks) ICODE_ATTR;
39void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR; 38void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR;
40 39
41/**************************************************************************** 40/****************************************************************************
@@ -71,13 +70,7 @@ void sleep(int ticks)
71 } while(counter > 0); 70 } while(counter > 0);
72 71
73#else 72#else
74 /* Always sleep at least 1 tick */ 73 sleep_thread(ticks);
75 int timeout = current_tick + ticks + 1;
76
77 while (TIME_BEFORE( current_tick, timeout )) {
78 sleep_thread();
79 }
80 wake_up_thread();
81#endif 74#endif
82} 75}
83 76
@@ -86,21 +79,24 @@ void yield(void)
86#if (CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) && defined(BOOTLOADER)) 79#if (CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) && defined(BOOTLOADER))
87 /* Some targets don't like yielding in the bootloader */ 80 /* Some targets don't like yielding in the bootloader */
88#else 81#else
89 switch_thread(); 82 switch_thread(true, NULL);
90 wake_up_thread();
91#endif 83#endif
92} 84}
93 85
94/**************************************************************************** 86/****************************************************************************
95 * Queue handling stuff 87 * Queue handling stuff
96 ****************************************************************************/ 88 ****************************************************************************/
97void queue_init(struct event_queue *q) 89void queue_init(struct event_queue *q, bool register_queue)
98{ 90{
99 q->read = 0; 91 q->read = 0;
100 q->write = 0; 92 q->write = 0;
101 93 q->thread = NULL;
102 /* Add it to the all_queues array */ 94
103 all_queues[num_queues++] = q; 95 if (register_queue)
96 {
97 /* Add it to the all_queues array */
98 all_queues[num_queues++] = q;
99 }
104} 100}
105 101
106void queue_delete(struct event_queue *q) 102void queue_delete(struct event_queue *q)
@@ -108,6 +104,8 @@ void queue_delete(struct event_queue *q)
108 int i; 104 int i;
109 bool found = false; 105 bool found = false;
110 106
107 wakeup_thread(&q->thread);
108
111 /* Find the queue to be deleted */ 109 /* Find the queue to be deleted */
112 for(i = 0;i < num_queues;i++) 110 for(i = 0;i < num_queues;i++)
113 { 111 {
@@ -132,26 +130,22 @@ void queue_delete(struct event_queue *q)
132 130
133void queue_wait(struct event_queue *q, struct event *ev) 131void queue_wait(struct event_queue *q, struct event *ev)
134{ 132{
135 while(q->read == q->write) 133 if (q->read == q->write)
136 { 134 {
137 sleep_thread(); 135 block_thread(&q->thread, 0);
138 } 136 }
139 wake_up_thread();
140 137
141 *ev = q->events[(q->read++) & QUEUE_LENGTH_MASK]; 138 *ev = q->events[(q->read++) & QUEUE_LENGTH_MASK];
142} 139}
143 140
144void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks) 141void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
145{ 142{
146 unsigned int timeout = current_tick + ticks; 143 if (q->read == q->write && ticks > 0)
147
148 while(q->read == q->write && TIME_BEFORE( current_tick, timeout ))
149 { 144 {
150 sleep_thread(); 145 block_thread(&q->thread, ticks);
151 } 146 }
152 wake_up_thread();
153 147
154 if(q->read != q->write) 148 if (q->read != q->write)
155 { 149 {
156 *ev = q->events[(q->read++) & QUEUE_LENGTH_MASK]; 150 *ev = q->events[(q->read++) & QUEUE_LENGTH_MASK];
157 } 151 }
@@ -171,6 +165,9 @@ void queue_post(struct event_queue *q, long id, void *data)
171 165
172 q->events[wr].id = id; 166 q->events[wr].id = id;
173 q->events[wr].data = data; 167 q->events[wr].data = data;
168
169 wakeup_thread(&q->thread);
170
174 set_irq_level(oldlevel); 171 set_irq_level(oldlevel);
175} 172}
176 173
@@ -250,7 +247,6 @@ void IMIA0(void)
250 } 247 }
251 248
252 current_tick++; 249 current_tick++;
253 wake_up_thread();
254 250
255 TSR0 &= ~0x01; 251 TSR0 &= ~0x01;
256} 252}
@@ -301,7 +297,6 @@ void TIMER0(void)
301 } 297 }
302 298
303 current_tick++; 299 current_tick++;
304 wake_up_thread();
305 300
306 TER0 = 0xff; /* Clear all events */ 301 TER0 = 0xff; /* Clear all events */
307} 302}
@@ -330,7 +325,6 @@ void TIMER0(void)
330 } 325 }
331 326
332 current_tick++; 327 current_tick++;
333 wake_up_thread();
334 328
335 /* re-enable timer by clearing the counter */ 329 /* re-enable timer by clearing the counter */
336 TACON |= 0x80; 330 TACON |= 0x80;
@@ -382,7 +376,6 @@ void TIMER1(void)
382 } 376 }
383 377
384 current_tick++; 378 current_tick++;
385 wake_up_thread();
386} 379}
387#endif 380#endif
388 381
@@ -415,7 +408,6 @@ void timer_handler(void)
415 } 408 }
416 409
417 current_tick++; 410 current_tick++;
418 wake_up_thread();
419 411
420 TIMERR0C = 1; 412 TIMERR0C = 1;
421} 413}
@@ -513,22 +505,27 @@ int tick_remove_task(void (*f)(void))
513void mutex_init(struct mutex *m) 505void mutex_init(struct mutex *m)
514{ 506{
515 m->locked = false; 507 m->locked = false;
508 m->thread = NULL;
516} 509}
517 510
518void mutex_lock(struct mutex *m) 511void mutex_lock(struct mutex *m)
519{ 512{
520 /* Wait until the lock is open... */ 513 if (m->locked)
521 while(m->locked) 514 {
522 sleep_thread(); 515 /* Wait until the lock is open... */
523 wake_up_thread(); 516 block_thread(&m->thread, 0);
524 517 }
518
525 /* ...and lock it */ 519 /* ...and lock it */
526 m->locked = true; 520 m->locked = true;
527} 521}
528 522
529void mutex_unlock(struct mutex *m) 523void mutex_unlock(struct mutex *m)
530{ 524{
531 m->locked = false; 525 if (m->thread == NULL)
526 m->locked = false;
527 else
528 wakeup_thread(&m->thread);
532} 529}
533 530
534#endif 531#endif