summaryrefslogtreecommitdiff
path: root/firmware/kernel/mutex.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-04-24 04:09:18 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-06 02:47:47 +0200
commit533d396761b630e372166f6f0522ba1c2d128d70 (patch)
tree823a5f800049f62d4ea9f573b4cdeb3e7ff9b3e1 /firmware/kernel/mutex.c
parent6536f1db3eedf0a12d16c5504cba94725eb6500d (diff)
downloadrockbox-533d396761b630e372166f6f0522ba1c2d128d70.tar.gz
rockbox-533d396761b630e372166f6f0522ba1c2d128d70.zip
Add multi-reader, single-writer locks to kernel.
Any number of readers may be in the critical section at a time and writers are mutually exclusive to all other threads. They are a better choice when data is rarely modified but often read and multiple threads can safely access it for reading. Priority inheritance is fully implemented along with other changes to the kernel to fully support it on multiowner objects. This also cleans up priority code in the kernel and updates some associated structures in existing objects to the cleaner form. Currently doesn't add the mrsw_lock.[ch] files since they're not yet needed by anything but the supporting improvements are still useful. This includes a typed bitarray API (bitarray.h) which is pretty basic for now. Change-Id: Idbe43dcd9170358e06d48d00f1c69728ff45b0e3 Reviewed-on: http://gerrit.rockbox.org/801 Reviewed-by: Michael Sevakis <jethead71@rockbox.org> Tested: Michael Sevakis <jethead71@rockbox.org>
Diffstat (limited to 'firmware/kernel/mutex.c')
-rw-r--r--firmware/kernel/mutex.c65
1 files changed, 21 insertions, 44 deletions
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c
index f1e4b3c722..2e90b0f4b1 100644
--- a/firmware/kernel/mutex.c
+++ b/firmware/kernel/mutex.c
@@ -27,31 +27,10 @@
27#include <stdbool.h> 27#include <stdbool.h>
28#include "config.h" 28#include "config.h"
29#include "system.h" 29#include "system.h"
30#include "mutex.h" 30#include "kernel.h"
31#include "corelock.h"
32#include "thread-internal.h" 31#include "thread-internal.h"
33#include "kernel-internal.h" 32#include "kernel-internal.h"
34 33
35static inline void __attribute__((always_inline))
36mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
37{
38#ifdef HAVE_PRIORITY_SCHEDULING
39 mtx->blocker.thread = td;
40#else
41 mtx->thread = td;
42#endif
43}
44
45static inline struct thread_entry * __attribute__((always_inline))
46mutex_get_thread(volatile struct mutex *mtx)
47{
48#ifdef HAVE_PRIORITY_SCHEDULING
49 return mtx->blocker.thread;
50#else
51 return mtx->thread;
52#endif
53}
54
55/* Initialize a mutex object - call before any use and do not call again once 34/* Initialize a mutex object - call before any use and do not call again once
56 * the object is available to other threads */ 35 * the object is available to other threads */
57void mutex_init(struct mutex *m) 36void mutex_init(struct mutex *m)
@@ -59,10 +38,9 @@ void mutex_init(struct mutex *m)
59 corelock_init(&m->cl); 38 corelock_init(&m->cl);
60 m->queue = NULL; 39 m->queue = NULL;
61 m->recursion = 0; 40 m->recursion = 0;
62 mutex_set_thread(m, NULL); 41 m->blocker.thread = NULL;
63#ifdef HAVE_PRIORITY_SCHEDULING 42#ifdef HAVE_PRIORITY_SCHEDULING
64 m->blocker.priority = PRIORITY_IDLE; 43 m->blocker.priority = PRIORITY_IDLE;
65 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
66 m->no_preempt = false; 44 m->no_preempt = false;
67#endif 45#endif
68} 46}
@@ -72,7 +50,7 @@ void mutex_lock(struct mutex *m)
72{ 50{
73 struct thread_entry *current = thread_self_entry(); 51 struct thread_entry *current = thread_self_entry();
74 52
75 if(current == mutex_get_thread(m)) 53 if(current == m->blocker.thread)
76 { 54 {
77 /* current thread already owns this mutex */ 55 /* current thread already owns this mutex */
78 m->recursion++; 56 m->recursion++;
@@ -83,10 +61,10 @@ void mutex_lock(struct mutex *m)
83 corelock_lock(&m->cl); 61 corelock_lock(&m->cl);
84 62
85 /* must read thread again inside cs (a multiprocessor concern really) */ 63 /* must read thread again inside cs (a multiprocessor concern really) */
86 if(LIKELY(mutex_get_thread(m) == NULL)) 64 if(LIKELY(m->blocker.thread == NULL))
87 { 65 {
88 /* lock is open */ 66 /* lock is open */
89 mutex_set_thread(m, current); 67 m->blocker.thread = current;
90 corelock_unlock(&m->cl); 68 corelock_unlock(&m->cl);
91 return; 69 return;
92 } 70 }
@@ -97,7 +75,7 @@ void mutex_lock(struct mutex *m)
97 current->bqp = &m->queue; 75 current->bqp = &m->queue;
98 76
99 disable_irq(); 77 disable_irq();
100 block_thread(current); 78 block_thread(current, TIMEOUT_BLOCK);
101 79
102 corelock_unlock(&m->cl); 80 corelock_unlock(&m->cl);
103 81
@@ -109,9 +87,9 @@ void mutex_lock(struct mutex *m)
109void mutex_unlock(struct mutex *m) 87void mutex_unlock(struct mutex *m)
110{ 88{
111 /* unlocker not being the owner is an unlocking violation */ 89 /* unlocker not being the owner is an unlocking violation */
112 KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(), 90 KERNEL_ASSERT(m->blocker.thread == thread_self_entry(),
113 "mutex_unlock->wrong thread (%s != %s)\n", 91 "mutex_unlock->wrong thread (%s != %s)\n",
114 mutex_get_thread(m)->name, 92 m->blocker.thread->name,
115 thread_self_entry()->name); 93 thread_self_entry()->name);
116 94
117 if(m->recursion > 0) 95 if(m->recursion > 0)
@@ -128,25 +106,24 @@ void mutex_unlock(struct mutex *m)
128 if(LIKELY(m->queue == NULL)) 106 if(LIKELY(m->queue == NULL))
129 { 107 {
130 /* no threads waiting - open the lock */ 108 /* no threads waiting - open the lock */
131 mutex_set_thread(m, NULL); 109 m->blocker.thread = NULL;
132 corelock_unlock(&m->cl); 110 corelock_unlock(&m->cl);
133 return; 111 return;
134 } 112 }
135 else
136 {
137 const int oldlevel = disable_irq_save();
138 /* Tranfer of owning thread is handled in the wakeup protocol
139 * if priorities are enabled otherwise just set it from the
140 * queue head. */
141 IFN_PRIO( mutex_set_thread(m, m->queue); )
142 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
143 restore_irq(oldlevel);
144 113
145 corelock_unlock(&m->cl); 114 const int oldlevel = disable_irq_save();
115 /* Tranfer of owning thread is handled in the wakeup protocol
116 * if priorities are enabled otherwise just set it from the
117 * queue head. */
118 IFN_PRIO( m->blocker.thread = m->queue; )
119 unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
120 restore_irq(oldlevel);
121
122 corelock_unlock(&m->cl);
146 123
147#ifdef HAVE_PRIORITY_SCHEDULING 124#ifdef HAVE_PRIORITY_SCHEDULING
148 if((result & THREAD_SWITCH) && !m->no_preempt) 125 if((result & THREAD_SWITCH) && !m->no_preempt)
149 switch_thread(); 126 switch_thread();
150#endif 127#endif
151 } 128 (void)result;
152} 129}