summaryrefslogtreecommitdiff
path: root/firmware/kernel/mutex.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-08-08 06:33:51 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-16 05:15:37 -0400
commit6ed00870abd566d7267d2436c2693f5a281cda2f (patch)
tree6011c73e302254fc73f61a1b8b1f295ded1f5d56 /firmware/kernel/mutex.c
parenteb63d8b4a2a7cbe4e98216b48a75391718fcebd7 (diff)
downloadrockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.tar.gz
rockbox-6ed00870abd566d7267d2436c2693f5a281cda2f.zip
Base scheduler queues off linked lists and do cleanup/consolidation
Abstracts threading from itself a bit, changes the way its queues are handled and does type hiding for that as well. Do alot here due to already required major brain surgery. Threads may now be on a run queue and a wait queue simultaneously so that the expired timer only has to wake the thread but not remove it from the wait queue which simplifies the implicit wake handling. List formats change for wait queues-- doubly-linked, not circular. Timeout queue is now singly-linked. The run queue is still circular as before. Adds a better thread slot allocator that may keep the slot marked as used regardless of the thread state. Assists in dumping special tasks that switch_thread was tasked to perform (blocking tasks). Deletes alot of code yet surprisingly, gets larger than expected. Well, I'm not not minding that for the time being-- omlettes and break a few eggs and all that. Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
Diffstat (limited to 'firmware/kernel/mutex.c')
-rw-r--r--firmware/kernel/mutex.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c
index e5729dc893..fc49cc6d09 100644
--- a/firmware/kernel/mutex.c
+++ b/firmware/kernel/mutex.c
@@ -30,20 +30,19 @@
30 * the object is available to other threads */ 30 * the object is available to other threads */
31void mutex_init(struct mutex *m) 31void mutex_init(struct mutex *m)
32{ 32{
33 corelock_init(&m->cl); 33 wait_queue_init(&m->queue);
34 m->queue = NULL;
35 m->recursion = 0; 34 m->recursion = 0;
36 m->blocker.thread = NULL; 35 blocker_init(&m->blocker);
37#ifdef HAVE_PRIORITY_SCHEDULING 36#ifdef HAVE_PRIORITY_SCHEDULING
38 m->blocker.priority = PRIORITY_IDLE;
39 m->no_preempt = false; 37 m->no_preempt = false;
40#endif 38#endif
39 corelock_init(&m->cl);
41} 40}
42 41
43/* Gain ownership of a mutex object or block until it becomes free */ 42/* Gain ownership of a mutex object or block until it becomes free */
44void mutex_lock(struct mutex *m) 43void mutex_lock(struct mutex *m)
45{ 44{
46 struct thread_entry *current = thread_self_entry(); 45 struct thread_entry *current = __running_self_entry();
47 46
48 if(current == m->blocker.thread) 47 if(current == m->blocker.thread)
49 { 48 {
@@ -65,12 +64,8 @@ void mutex_lock(struct mutex *m)
65 } 64 }
66 65
67 /* block until the lock is open... */ 66 /* block until the lock is open... */
68 IF_COP( current->obj_cl = &m->cl; )
69 IF_PRIO( current->blocker = &m->blocker; )
70 current->bqp = &m->queue;
71
72 disable_irq(); 67 disable_irq();
73 block_thread(current, TIMEOUT_BLOCK); 68 block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);
74 69
75 corelock_unlock(&m->cl); 70 corelock_unlock(&m->cl);
76 71
@@ -82,10 +77,10 @@ void mutex_lock(struct mutex *m)
82void mutex_unlock(struct mutex *m) 77void mutex_unlock(struct mutex *m)
83{ 78{
84 /* unlocker not being the owner is an unlocking violation */ 79 /* unlocker not being the owner is an unlocking violation */
85 KERNEL_ASSERT(m->blocker.thread == thread_self_entry(), 80 KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
86 "mutex_unlock->wrong thread (%s != %s)\n", 81 "mutex_unlock->wrong thread (%s != %s)\n",
87 m->blocker.thread->name, 82 m->blocker.thread->name,
88 thread_self_entry()->name); 83 __running_self_entry()->name);
89 84
90 if(m->recursion > 0) 85 if(m->recursion > 0)
91 { 86 {
@@ -98,7 +93,8 @@ void mutex_unlock(struct mutex *m)
98 corelock_lock(&m->cl); 93 corelock_lock(&m->cl);
99 94
100 /* transfer to next queued thread if any */ 95 /* transfer to next queued thread if any */
101 if(LIKELY(m->queue == NULL)) 96 struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
97 if(LIKELY(thread == NULL))
102 { 98 {
103 /* no threads waiting - open the lock */ 99 /* no threads waiting - open the lock */
104 m->blocker.thread = NULL; 100 m->blocker.thread = NULL;
@@ -107,11 +103,7 @@ void mutex_unlock(struct mutex *m)
107 } 103 }
108 104
109 const int oldlevel = disable_irq_save(); 105 const int oldlevel = disable_irq_save();
110 /* Tranfer of owning thread is handled in the wakeup protocol 106 unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
111 * if priorities are enabled otherwise just set it from the
112 * queue head. */
113 IFN_PRIO( m->blocker.thread = m->queue; )
114 unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
115 restore_irq(oldlevel); 107 restore_irq(oldlevel);
116 108
117 corelock_unlock(&m->cl); 109 corelock_unlock(&m->cl);