summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c166
1 files changed, 67 insertions, 99 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 588bbd2a75..a8718ebf34 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -978,6 +978,9 @@ void mutex_unlock(struct mutex *m)
978 * Simple semaphore functions ;) 978 * Simple semaphore functions ;)
979 ****************************************************************************/ 979 ****************************************************************************/
980#ifdef HAVE_SEMAPHORE_OBJECTS 980#ifdef HAVE_SEMAPHORE_OBJECTS
981/* Initialize the semaphore object.
982 * max = maximum up count the semaphore may assume (max >= 1)
983 * start = initial count of semaphore (0 <= count <= max) */
981void semaphore_init(struct semaphore *s, int max, int start) 984void semaphore_init(struct semaphore *s, int max, int start)
982{ 985{
983 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, 986 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
@@ -988,132 +991,97 @@ void semaphore_init(struct semaphore *s, int max, int start)
988 corelock_init(&s->cl); 991 corelock_init(&s->cl);
989} 992}
990 993
991void semaphore_wait(struct semaphore *s) 994/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
995 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
996 * safely be used in an ISR. */
997int semaphore_wait(struct semaphore *s, int timeout)
992{ 998{
993 struct thread_entry *current; 999 int ret;
1000 int oldlevel;
1001 int count;
994 1002
1003 oldlevel = disable_irq_save();
995 corelock_lock(&s->cl); 1004 corelock_lock(&s->cl);
996 1005
997 if(LIKELY(--s->count >= 0)) 1006 count = s->count;
1007
1008 if(LIKELY(count > 0))
998 { 1009 {
999 /* wait satisfied */ 1010 /* count is not zero; down it */
1000 corelock_unlock(&s->cl); 1011 s->count = count - 1;
1001 return; 1012 ret = OBJ_WAIT_SUCCEEDED;
1013 }
1014 else if(timeout == 0)
1015 {
1016 /* just polling it */
1017 ret = OBJ_WAIT_TIMEDOUT;
1002 } 1018 }
1019 else
1020 {
1021 /* too many waits - block until count is upped... */
1022 struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
1023 IF_COP( current->obj_cl = &s->cl; )
1024 current->bqp = &s->queue;
1025 /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
1026 * explicit in semaphore_release */
1027 current->retval = OBJ_WAIT_TIMEDOUT;
1028
1029 if(timeout > 0)
1030 block_thread_w_tmo(current, timeout); /* ...or timed out... */
1031 else
1032 block_thread(current); /* -timeout = infinite */
1003 1033
1004 /* too many waits - block until dequeued... */ 1034 corelock_unlock(&s->cl);
1005 current = thread_id_entry(THREAD_ID_CURRENT);
1006 1035
1007 IF_COP( current->obj_cl = &s->cl; ) 1036 /* ...and turn control over to next thread */
1008 current->bqp = &s->queue; 1037 switch_thread();
1009 1038
1010 disable_irq(); 1039 return current->retval;
1011 block_thread(current); 1040 }
1012 1041
1013 corelock_unlock(&s->cl); 1042 corelock_unlock(&s->cl);
1043 restore_irq(oldlevel);
1014 1044
1015 /* ...and turn control over to next thread */ 1045 return ret;
1016 switch_thread();
1017} 1046}
1018 1047
1048/* Up the semaphore's count and release any thread waiting at the head of the
1049 * queue. The count is saturated to the value of the 'max' parameter specified
1050 * in 'semaphore_init'. */
1019void semaphore_release(struct semaphore *s) 1051void semaphore_release(struct semaphore *s)
1020{ 1052{
1021 IF_PRIO( unsigned int result = THREAD_NONE; ) 1053 IF_PRIO( unsigned int result = THREAD_NONE; )
1054 int oldlevel;
1022 1055
1056 oldlevel = disable_irq_save();
1023 corelock_lock(&s->cl); 1057 corelock_lock(&s->cl);
1024 1058
1025 if(s->count < s->max && ++s->count <= 0) 1059 if(LIKELY(s->queue != NULL))
1026 { 1060 {
1027 /* there should be threads in this queue */ 1061 /* a thread was queued - wake it up and keep count at 0 */
1028 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n"); 1062 KERNEL_ASSERT(s->count == 0,
1029 /* a thread was queued - wake it up */ 1063 "semaphore_release->threads queued but count=%d!\n", s->count);
1030 int oldlevel = disable_irq_save(); 1064 s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
1031 IF_PRIO( result = ) wakeup_thread(&s->queue); 1065 IF_PRIO( result = ) wakeup_thread(&s->queue);
1032 restore_irq(oldlevel); 1066 }
1067 else
1068 {
1069 int count = s->count;
1070 if(count < s->max)
1071 {
1072 /* nothing waiting - up it */
1073 s->count = count + 1;
1074 }
1033 } 1075 }
1034 1076
1035 corelock_unlock(&s->cl); 1077 corelock_unlock(&s->cl);
1078 restore_irq(oldlevel);
1036 1079
1037#ifdef HAVE_PRIORITY_SCHEDULING 1080#if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval)
1038 if(result & THREAD_SWITCH) 1081 /* No thread switch if IRQ disabled - it's probably called via ISR.
1082 * switch_thread would as well enable them anyway. */
1083 if((result & THREAD_SWITCH) && irq_enabled_checkval(oldlevel))
1039 switch_thread(); 1084 switch_thread();
1040#endif 1085#endif
1041} 1086}
1042#endif /* HAVE_SEMAPHORE_OBJECTS */ 1087#endif /* HAVE_SEMAPHORE_OBJECTS */
1043
1044#ifdef HAVE_WAKEUP_OBJECTS
1045/****************************************************************************
1046 * Lightweight IRQ-compatible wakeup object
1047 */
1048
1049/* Initialize the wakeup object */
1050void wakeup_init(struct wakeup *w)
1051{
1052 w->queue = NULL;
1053 w->signalled = false;
1054 IF_COP( corelock_init(&w->cl); )
1055}
1056
1057/* Wait for a signal blocking indefinitely or for a specified period */
1058int wakeup_wait(struct wakeup *w, int timeout)
1059{
1060 int ret = OBJ_WAIT_SUCCEEDED; /* Presume success */
1061 int oldlevel = disable_irq_save();
1062
1063 corelock_lock(&w->cl);
1064
1065 if(LIKELY(!w->signalled && timeout != TIMEOUT_NOBLOCK))
1066 {
1067 struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
1068
1069 IF_COP( current->obj_cl = &w->cl; )
1070 current->bqp = &w->queue;
1071
1072 if (timeout != TIMEOUT_BLOCK)
1073 block_thread_w_tmo(current, timeout);
1074 else
1075 block_thread(current);
1076
1077 corelock_unlock(&w->cl);
1078 switch_thread();
1079
1080 oldlevel = disable_irq_save();
1081 corelock_lock(&w->cl);
1082 }
1083
1084 if(UNLIKELY(!w->signalled))
1085 {
1086 /* Timed-out or failed */
1087 ret = (timeout != TIMEOUT_BLOCK) ?
1088 OBJ_WAIT_TIMEDOUT : OBJ_WAIT_FAILED;
1089 }
1090
1091 w->signalled = false; /* Reset */
1092
1093 corelock_unlock(&w->cl);
1094 restore_irq(oldlevel);
1095
1096 return ret;
1097}
1098
1099/* Signal the thread waiting or leave the signal if the thread hasn't
1100 * waited yet.
1101 *
1102 * returns THREAD_NONE or THREAD_OK
1103 */
1104int wakeup_signal(struct wakeup *w)
1105{
1106 int oldlevel = disable_irq_save();
1107 int ret;
1108
1109 corelock_lock(&w->cl);
1110
1111 w->signalled = true;
1112 ret = wakeup_thread(&w->queue);
1113
1114 corelock_unlock(&w->cl);
1115 restore_irq(oldlevel);
1116
1117 return ret;
1118}
1119#endif /* HAVE_WAKEUP_OBJECTS */