summaryrefslogtreecommitdiff
path: root/firmware/kernel/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/mutex.c')
-rw-r--r--firmware/kernel/mutex.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c
index e5729dc893..fc49cc6d09 100644
--- a/firmware/kernel/mutex.c
+++ b/firmware/kernel/mutex.c
@@ -30,20 +30,19 @@
30 * the object is available to other threads */ 30 * the object is available to other threads */
31void mutex_init(struct mutex *m) 31void mutex_init(struct mutex *m)
32{ 32{
33 corelock_init(&m->cl); 33 wait_queue_init(&m->queue);
34 m->queue = NULL;
35 m->recursion = 0; 34 m->recursion = 0;
36 m->blocker.thread = NULL; 35 blocker_init(&m->blocker);
37#ifdef HAVE_PRIORITY_SCHEDULING 36#ifdef HAVE_PRIORITY_SCHEDULING
38 m->blocker.priority = PRIORITY_IDLE;
39 m->no_preempt = false; 37 m->no_preempt = false;
40#endif 38#endif
39 corelock_init(&m->cl);
41} 40}
42 41
43/* Gain ownership of a mutex object or block until it becomes free */ 42/* Gain ownership of a mutex object or block until it becomes free */
44void mutex_lock(struct mutex *m) 43void mutex_lock(struct mutex *m)
45{ 44{
46 struct thread_entry *current = thread_self_entry(); 45 struct thread_entry *current = __running_self_entry();
47 46
48 if(current == m->blocker.thread) 47 if(current == m->blocker.thread)
49 { 48 {
@@ -65,12 +64,8 @@ void mutex_lock(struct mutex *m)
65 } 64 }
66 65
67 /* block until the lock is open... */ 66 /* block until the lock is open... */
68 IF_COP( current->obj_cl = &m->cl; )
69 IF_PRIO( current->blocker = &m->blocker; )
70 current->bqp = &m->queue;
71
72 disable_irq(); 67 disable_irq();
73 block_thread(current, TIMEOUT_BLOCK); 68 block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);
74 69
75 corelock_unlock(&m->cl); 70 corelock_unlock(&m->cl);
76 71
@@ -82,10 +77,10 @@ void mutex_lock(struct mutex *m)
82void mutex_unlock(struct mutex *m) 77void mutex_unlock(struct mutex *m)
83{ 78{
84 /* unlocker not being the owner is an unlocking violation */ 79 /* unlocker not being the owner is an unlocking violation */
85 KERNEL_ASSERT(m->blocker.thread == thread_self_entry(), 80 KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
86 "mutex_unlock->wrong thread (%s != %s)\n", 81 "mutex_unlock->wrong thread (%s != %s)\n",
87 m->blocker.thread->name, 82 m->blocker.thread->name,
88 thread_self_entry()->name); 83 __running_self_entry()->name);
89 84
90 if(m->recursion > 0) 85 if(m->recursion > 0)
91 { 86 {
@@ -98,7 +93,8 @@ void mutex_unlock(struct mutex *m)
98 corelock_lock(&m->cl); 93 corelock_lock(&m->cl);
99 94
100 /* transfer to next queued thread if any */ 95 /* transfer to next queued thread if any */
101 if(LIKELY(m->queue == NULL)) 96 struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
97 if(LIKELY(thread == NULL))
102 { 98 {
103 /* no threads waiting - open the lock */ 99 /* no threads waiting - open the lock */
104 m->blocker.thread = NULL; 100 m->blocker.thread = NULL;
@@ -107,11 +103,7 @@ void mutex_unlock(struct mutex *m)
107 } 103 }
108 104
109 const int oldlevel = disable_irq_save(); 105 const int oldlevel = disable_irq_save();
110 /* Tranfer of owning thread is handled in the wakeup protocol 106 unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
111 * if priorities are enabled otherwise just set it from the
112 * queue head. */
113 IFN_PRIO( m->blocker.thread = m->queue; )
114 unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
115 restore_irq(oldlevel); 107 restore_irq(oldlevel);
116 108
117 corelock_unlock(&m->cl); 109 corelock_unlock(&m->cl);