summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2008-03-26 01:50:41 +0000
committerMichael Sevakis <jethead71@rockbox.org>2008-03-26 01:50:41 +0000
commitaf395f4db6ad7b83f9d9afefb1c0ceeedd140a45 (patch)
treeb631289b4a3b28d3c65b10d272d50298f377c69f /firmware/kernel.c
parent74d678fdbcbc427c057e7682ba0a0566e49a8b97 (diff)
downloadrockbox-af395f4db6ad7b83f9d9afefb1c0ceeedd140a45.tar.gz
rockbox-af395f4db6ad7b83f9d9afefb1c0ceeedd140a45.zip
Do core interrupt masking in a less general fashion and save some instructions to decrease size and speed things up a little bit. Small fix to a few places where interrupts would get enabled again where they shouldn't have been (context switching calls when disabled).
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16811 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 47c0d58a95..be0cfb56a4 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -270,7 +270,7 @@ void tick_start(unsigned int interval_in_ms)
270int tick_add_task(void (*f)(void)) 270int tick_add_task(void (*f)(void))
271{ 271{
272 int i; 272 int i;
273 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 273 int oldlevel = disable_irq_save();
274 274
275 /* Add a task if there is room */ 275 /* Add a task if there is room */
276 for(i = 0;i < MAX_NUM_TICK_TASKS;i++) 276 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
@@ -278,11 +278,11 @@ int tick_add_task(void (*f)(void))
278 if(tick_funcs[i] == NULL) 278 if(tick_funcs[i] == NULL)
279 { 279 {
280 tick_funcs[i] = f; 280 tick_funcs[i] = f;
281 set_irq_level(oldlevel); 281 restore_irq(oldlevel);
282 return 0; 282 return 0;
283 } 283 }
284 } 284 }
285 set_irq_level(oldlevel); 285 restore_irq(oldlevel);
286 panicf("Error! tick_add_task(): out of tasks"); 286 panicf("Error! tick_add_task(): out of tasks");
287 return -1; 287 return -1;
288} 288}
@@ -290,7 +290,7 @@ int tick_add_task(void (*f)(void))
290int tick_remove_task(void (*f)(void)) 290int tick_remove_task(void (*f)(void))
291{ 291{
292 int i; 292 int i;
293 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 293 int oldlevel = disable_irq_save();
294 294
295 /* Remove a task if it is there */ 295 /* Remove a task if it is there */
296 for(i = 0;i < MAX_NUM_TICK_TASKS;i++) 296 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
@@ -298,12 +298,12 @@ int tick_remove_task(void (*f)(void))
298 if(tick_funcs[i] == f) 298 if(tick_funcs[i] == f)
299 { 299 {
300 tick_funcs[i] = NULL; 300 tick_funcs[i] = NULL;
301 set_irq_level(oldlevel); 301 restore_irq(oldlevel);
302 return 0; 302 return 0;
303 } 303 }
304 } 304 }
305 305
306 set_irq_level(oldlevel); 306 restore_irq(oldlevel);
307 return -1; 307 return -1;
308} 308}
309 309
@@ -341,7 +341,7 @@ static void timeout_tick(void)
341/* Cancels a timeout callback - can be called from the ISR */ 341/* Cancels a timeout callback - can be called from the ISR */
342void timeout_cancel(struct timeout *tmo) 342void timeout_cancel(struct timeout *tmo)
343{ 343{
344 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 344 int oldlevel = disable_irq_save();
345 345
346 if (tmo_list != NULL) 346 if (tmo_list != NULL)
347 { 347 {
@@ -368,7 +368,7 @@ void timeout_cancel(struct timeout *tmo)
368 /* not in list or tmo == NULL */ 368 /* not in list or tmo == NULL */
369 } 369 }
370 370
371 set_irq_level(oldlevel); 371 restore_irq(oldlevel);
372} 372}
373 373
374/* Adds a timeout callback - calling with an active timeout resets the 374/* Adds a timeout callback - calling with an active timeout resets the
@@ -382,7 +382,7 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
382 if (tmo == NULL) 382 if (tmo == NULL)
383 return; 383 return;
384 384
385 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 385 oldlevel = disable_irq_save();
386 386
387 /* see if this one is already registered */ 387 /* see if this one is already registered */
388 curr = tmo_list; 388 curr = tmo_list;
@@ -404,7 +404,7 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
404 tmo->data = data; 404 tmo->data = data;
405 *(long *)&tmo->expires = current_tick + ticks; 405 *(long *)&tmo->expires = current_tick + ticks;
406 406
407 set_irq_level(oldlevel); 407 restore_irq(oldlevel);
408} 408}
409 409
410#endif /* INCLUDE_TIMEOUT_API */ 410#endif /* INCLUDE_TIMEOUT_API */
@@ -433,7 +433,7 @@ void sleep(int ticks)
433 while (TIME_BEFORE(USEC_TIMER, stop)) 433 while (TIME_BEFORE(USEC_TIMER, stop))
434 switch_thread(); 434 switch_thread();
435#else 435#else
436 set_irq_level(HIGHEST_IRQ_LEVEL); 436 disable_irq();
437 sleep_thread(ticks); 437 sleep_thread(ticks);
438 switch_thread(); 438 switch_thread();
439#endif 439#endif
@@ -537,7 +537,7 @@ void queue_enable_queue_send(struct event_queue *q,
537 struct queue_sender_list *send, 537 struct queue_sender_list *send,
538 struct thread_entry *owner) 538 struct thread_entry *owner)
539{ 539{
540 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 540 int oldlevel = disable_irq_save();
541 corelock_lock(&q->cl); 541 corelock_lock(&q->cl);
542 542
543 if(send != NULL && q->send == NULL) 543 if(send != NULL && q->send == NULL)
@@ -554,7 +554,7 @@ void queue_enable_queue_send(struct event_queue *q,
554 } 554 }
555 555
556 corelock_unlock(&q->cl); 556 corelock_unlock(&q->cl);
557 set_irq_level(oldlevel); 557 restore_irq(oldlevel);
558 558
559 (void)owner; 559 (void)owner;
560} 560}
@@ -618,7 +618,7 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
618/* Queue must not be available for use during this call */ 618/* Queue must not be available for use during this call */
619void queue_init(struct event_queue *q, bool register_queue) 619void queue_init(struct event_queue *q, bool register_queue)
620{ 620{
621 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 621 int oldlevel = disable_irq_save();
622 622
623 if(register_queue) 623 if(register_queue)
624 { 624 {
@@ -645,7 +645,7 @@ void queue_init(struct event_queue *q, bool register_queue)
645 corelock_unlock(&all_queues.cl); 645 corelock_unlock(&all_queues.cl);
646 } 646 }
647 647
648 set_irq_level(oldlevel); 648 restore_irq(oldlevel);
649} 649}
650 650
651/* Queue must not be available for use during this call */ 651/* Queue must not be available for use during this call */
@@ -654,7 +654,7 @@ void queue_delete(struct event_queue *q)
654 int oldlevel; 654 int oldlevel;
655 int i; 655 int i;
656 656
657 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 657 oldlevel = disable_irq_save();
658 corelock_lock(&all_queues.cl); 658 corelock_lock(&all_queues.cl);
659 corelock_lock(&q->cl); 659 corelock_lock(&q->cl);
660 660
@@ -697,7 +697,7 @@ void queue_delete(struct event_queue *q)
697 q->write = 0; 697 q->write = 0;
698 698
699 corelock_unlock(&q->cl); 699 corelock_unlock(&q->cl);
700 set_irq_level(oldlevel); 700 restore_irq(oldlevel);
701} 701}
702 702
703/* NOTE: multiple threads waiting on a queue head cannot have a well- 703/* NOTE: multiple threads waiting on a queue head cannot have a well-
@@ -714,7 +714,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
714 "queue_wait->wrong thread\n"); 714 "queue_wait->wrong thread\n");
715#endif 715#endif
716 716
717 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 717 oldlevel = disable_irq_save();
718 corelock_lock(&q->cl); 718 corelock_lock(&q->cl);
719 719
720 /* auto-reply */ 720 /* auto-reply */
@@ -734,7 +734,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
734 corelock_unlock(&q->cl); 734 corelock_unlock(&q->cl);
735 switch_thread(); 735 switch_thread();
736 736
737 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 737 oldlevel = disable_irq_save();
738 corelock_lock(&q->cl); 738 corelock_lock(&q->cl);
739 } 739 }
740 /* A message that woke us could now be gone */ 740 /* A message that woke us could now be gone */
@@ -748,7 +748,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
748 queue_do_fetch_sender(q->send, rd); 748 queue_do_fetch_sender(q->send, rd);
749 749
750 corelock_unlock(&q->cl); 750 corelock_unlock(&q->cl);
751 set_irq_level(oldlevel); 751 restore_irq(oldlevel);
752} 752}
753 753
754void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) 754void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
@@ -761,7 +761,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
761 "queue_wait_w_tmo->wrong thread\n"); 761 "queue_wait_w_tmo->wrong thread\n");
762#endif 762#endif
763 763
764 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 764 oldlevel = disable_irq_save();
765 corelock_lock(&q->cl); 765 corelock_lock(&q->cl);
766 766
767 /* Auto-reply */ 767 /* Auto-reply */
@@ -779,7 +779,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
779 779
780 switch_thread(); 780 switch_thread();
781 781
782 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 782 oldlevel = disable_irq_save();
783 corelock_lock(&q->cl); 783 corelock_lock(&q->cl);
784 } 784 }
785 785
@@ -798,7 +798,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
798 } 798 }
799 799
800 corelock_unlock(&q->cl); 800 corelock_unlock(&q->cl);
801 set_irq_level(oldlevel); 801 restore_irq(oldlevel);
802} 802}
803 803
804void queue_post(struct event_queue *q, long id, intptr_t data) 804void queue_post(struct event_queue *q, long id, intptr_t data)
@@ -806,7 +806,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
806 int oldlevel; 806 int oldlevel;
807 unsigned int wr; 807 unsigned int wr;
808 808
809 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 809 oldlevel = disable_irq_save();
810 corelock_lock(&q->cl); 810 corelock_lock(&q->cl);
811 811
812 wr = q->write++ & QUEUE_LENGTH_MASK; 812 wr = q->write++ & QUEUE_LENGTH_MASK;
@@ -821,7 +821,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
821 wakeup_thread(&q->queue); 821 wakeup_thread(&q->queue);
822 822
823 corelock_unlock(&q->cl); 823 corelock_unlock(&q->cl);
824 set_irq_level(oldlevel); 824 restore_irq(oldlevel);
825} 825}
826 826
827#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 827#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
@@ -832,7 +832,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
832 int oldlevel; 832 int oldlevel;
833 unsigned int wr; 833 unsigned int wr;
834 834
835 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 835 oldlevel = disable_irq_save();
836 corelock_lock(&q->cl); 836 corelock_lock(&q->cl);
837 837
838 wr = q->write++ & QUEUE_LENGTH_MASK; 838 wr = q->write++ & QUEUE_LENGTH_MASK;
@@ -875,7 +875,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
875 wakeup_thread(&q->queue); 875 wakeup_thread(&q->queue);
876 876
877 corelock_unlock(&q->cl); 877 corelock_unlock(&q->cl);
878 set_irq_level(oldlevel); 878 restore_irq(oldlevel);
879 879
880 return 0; 880 return 0;
881} 881}
@@ -887,7 +887,7 @@ bool queue_in_queue_send(struct event_queue *q)
887 bool in_send; 887 bool in_send;
888 888
889#if NUM_CORES > 1 889#if NUM_CORES > 1
890 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 890 int oldlevel = disable_irq_save();
891 corelock_lock(&q->cl); 891 corelock_lock(&q->cl);
892#endif 892#endif
893 893
@@ -895,7 +895,7 @@ bool queue_in_queue_send(struct event_queue *q)
895 895
896#if NUM_CORES > 1 896#if NUM_CORES > 1
897 corelock_unlock(&q->cl); 897 corelock_unlock(&q->cl);
898 set_irq_level(oldlevel); 898 restore_irq(oldlevel);
899#endif 899#endif
900 900
901 return in_send; 901 return in_send;
@@ -907,7 +907,7 @@ void queue_reply(struct event_queue *q, intptr_t retval)
907{ 907{
908 if(q->send && q->send->curr_sender) 908 if(q->send && q->send->curr_sender)
909 { 909 {
910 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 910 int oldlevel = disable_irq_save();
911 corelock_lock(&q->cl); 911 corelock_lock(&q->cl);
912 /* Double-check locking */ 912 /* Double-check locking */
913 IF_COP( if(q->send && q->send->curr_sender) ) 913 IF_COP( if(q->send && q->send->curr_sender) )
@@ -916,7 +916,7 @@ void queue_reply(struct event_queue *q, intptr_t retval)
916 } 916 }
917 917
918 corelock_unlock(&q->cl); 918 corelock_unlock(&q->cl);
919 set_irq_level(oldlevel); 919 restore_irq(oldlevel);
920 } 920 }
921} 921}
922 922
@@ -927,7 +927,7 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
927 927
928 bool have_msg = false; 928 bool have_msg = false;
929 929
930 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 930 int oldlevel = disable_irq_save();
931 corelock_lock(&q->cl); 931 corelock_lock(&q->cl);
932 932
933 if(q->read != q->write) 933 if(q->read != q->write)
@@ -937,7 +937,7 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
937 } 937 }
938 938
939 corelock_unlock(&q->cl); 939 corelock_unlock(&q->cl);
940 set_irq_level(oldlevel); 940 restore_irq(oldlevel);
941 941
942 return have_msg; 942 return have_msg;
943} 943}
@@ -956,7 +956,7 @@ void queue_clear(struct event_queue* q)
956{ 956{
957 int oldlevel; 957 int oldlevel;
958 958
959 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 959 oldlevel = disable_irq_save();
960 corelock_lock(&q->cl); 960 corelock_lock(&q->cl);
961 961
962 /* Release all threads waiting in the queue for a reply - 962 /* Release all threads waiting in the queue for a reply -
@@ -967,14 +967,14 @@ void queue_clear(struct event_queue* q)
967 q->write = 0; 967 q->write = 0;
968 968
969 corelock_unlock(&q->cl); 969 corelock_unlock(&q->cl);
970 set_irq_level(oldlevel); 970 restore_irq(oldlevel);
971} 971}
972 972
973void queue_remove_from_head(struct event_queue *q, long id) 973void queue_remove_from_head(struct event_queue *q, long id)
974{ 974{
975 int oldlevel; 975 int oldlevel;
976 976
977 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 977 oldlevel = disable_irq_save();
978 corelock_lock(&q->cl); 978 corelock_lock(&q->cl);
979 979
980 while(q->read != q->write) 980 while(q->read != q->write)
@@ -993,7 +993,7 @@ void queue_remove_from_head(struct event_queue *q, long id)
993 } 993 }
994 994
995 corelock_unlock(&q->cl); 995 corelock_unlock(&q->cl);
996 set_irq_level(oldlevel); 996 restore_irq(oldlevel);
997} 997}
998 998
999/** 999/**
@@ -1012,7 +1012,7 @@ int queue_broadcast(long id, intptr_t data)
1012 int i; 1012 int i;
1013 1013
1014#if NUM_CORES > 1 1014#if NUM_CORES > 1
1015 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1015 int oldlevel = disable_irq_save();
1016 corelock_lock(&all_queues.cl); 1016 corelock_lock(&all_queues.cl);
1017#endif 1017#endif
1018 1018
@@ -1023,7 +1023,7 @@ int queue_broadcast(long id, intptr_t data)
1023 1023
1024#if NUM_CORES > 1 1024#if NUM_CORES > 1
1025 corelock_unlock(&all_queues.cl); 1025 corelock_unlock(&all_queues.cl);
1026 set_irq_level(oldlevel); 1026 restore_irq(oldlevel);
1027#endif 1027#endif
1028 1028
1029 return i; 1029 return i;
@@ -1079,7 +1079,7 @@ void mutex_lock(struct mutex *m)
1079 IF_PRIO( current->blocker = &m->blocker; ) 1079 IF_PRIO( current->blocker = &m->blocker; )
1080 current->bqp = &m->queue; 1080 current->bqp = &m->queue;
1081 1081
1082 set_irq_level(HIGHEST_IRQ_LEVEL); 1082 disable_irq();
1083 block_thread(current); 1083 block_thread(current);
1084 1084
1085 corelock_unlock(&m->cl); 1085 corelock_unlock(&m->cl);
@@ -1118,13 +1118,13 @@ void mutex_unlock(struct mutex *m)
1118 } 1118 }
1119 else 1119 else
1120 { 1120 {
1121 const int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1121 const int oldlevel = disable_irq_save();
1122 /* Tranfer of owning thread is handled in the wakeup protocol 1122 /* Tranfer of owning thread is handled in the wakeup protocol
1123 * if priorities are enabled otherwise just set it from the 1123 * if priorities are enabled otherwise just set it from the
1124 * queue head. */ 1124 * queue head. */
1125 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); ) 1125 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
1126 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue); 1126 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
1127 set_irq_level(oldlevel); 1127 restore_irq(oldlevel);
1128 1128
1129 corelock_unlock(&m->cl); 1129 corelock_unlock(&m->cl);
1130 1130
@@ -1219,7 +1219,7 @@ void semaphore_wait(struct semaphore *s)
1219 IF_COP( current->obj_cl = &s->cl; ) 1219 IF_COP( current->obj_cl = &s->cl; )
1220 current->bqp = &s->queue; 1220 current->bqp = &s->queue;
1221 1221
1222 set_irq_level(HIGHEST_IRQ_LEVEL); 1222 disable_irq();
1223 block_thread(current); 1223 block_thread(current);
1224 1224
1225 corelock_unlock(&s->cl); 1225 corelock_unlock(&s->cl);
@@ -1239,9 +1239,9 @@ void semaphore_release(struct semaphore *s)
1239 /* there should be threads in this queue */ 1239 /* there should be threads in this queue */
1240 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n"); 1240 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1241 /* a thread was queued - wake it up */ 1241 /* a thread was queued - wake it up */
1242 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1242 int oldlevel = disable_irq_save();
1243 IF_PRIO( result = ) wakeup_thread(&s->queue); 1243 IF_PRIO( result = ) wakeup_thread(&s->queue);
1244 set_irq_level(oldlevel); 1244 restore_irq(oldlevel);
1245 } 1245 }
1246 1246
1247 corelock_unlock(&s->cl); 1247 corelock_unlock(&s->cl);
@@ -1298,7 +1298,7 @@ void event_wait(struct event *e, unsigned int for_state)
1298 IF_COP( current->obj_cl = &e->cl; ) 1298 IF_COP( current->obj_cl = &e->cl; )
1299 current->bqp = &e->queues[for_state]; 1299 current->bqp = &e->queues[for_state];
1300 1300
1301 set_irq_level(HIGHEST_IRQ_LEVEL); 1301 disable_irq();
1302 block_thread(current); 1302 block_thread(current);
1303 1303
1304 corelock_unlock(&e->cl); 1304 corelock_unlock(&e->cl);
@@ -1323,7 +1323,7 @@ void event_set_state(struct event *e, unsigned int state)
1323 1323
1324 IF_PRIO( result = THREAD_OK; ) 1324 IF_PRIO( result = THREAD_OK; )
1325 1325
1326 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1326 oldlevel = disable_irq_save();
1327 1327
1328 if(state == STATE_SIGNALED) 1328 if(state == STATE_SIGNALED)
1329 { 1329 {
@@ -1357,7 +1357,7 @@ void event_set_state(struct event *e, unsigned int state)
1357 thread_queue_wake(&e->queues[STATE_NONSIGNALED]); 1357 thread_queue_wake(&e->queues[STATE_NONSIGNALED]);
1358 } 1358 }
1359 1359
1360 set_irq_level(oldlevel); 1360 restore_irq(oldlevel);
1361 1361
1362 corelock_unlock(&e->cl); 1362 corelock_unlock(&e->cl);
1363 1363