summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2010-12-26 05:59:39 +0000
committerMichael Sevakis <jethead71@rockbox.org>2010-12-26 05:59:39 +0000
commitf387cdef2131b2a0956ee8e4ff7221d3251b8f46 (patch)
tree00b9226241ecb83e8c92798241bb14ebcf5b6fab
parent1d460b603fded79cb879704a4d30a50318263211 (diff)
downloadrockbox-f387cdef2131b2a0956ee8e4ff7221d3251b8f46.tar.gz
rockbox-f387cdef2131b2a0956ee8e4ff7221d3251b8f46.zip
Make mutexes a tiny bit leaner. There is no need for a separate locked semaphore since having an owning thread also indicates that it is locked. Rename member 'count' to 'recursion' since it counts reentry, not locks. Change presents no compatibility issues for plugins or codecs because the structure size goes down.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@28901 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/drivers/ata_mmc.c2
-rw-r--r--firmware/export/kernel.h19
-rw-r--r--firmware/kernel.c24
3 files changed, 25 insertions, 20 deletions
diff --git a/firmware/drivers/ata_mmc.c b/firmware/drivers/ata_mmc.c
index c27c3b5d05..dfc63021c9 100644
--- a/firmware/drivers/ata_mmc.c
+++ b/firmware/drivers/ata_mmc.c
@@ -760,7 +760,7 @@ int mmc_write_sectors(IF_MD2(int drive,)
760bool mmc_disk_is_active(void) 760bool mmc_disk_is_active(void)
761{ 761{
762 /* this is correct unless early return from write gets implemented */ 762 /* this is correct unless early return from write gets implemented */
763 return mmc_mutex.locked; 763 return mutex_test(&mmc_mutex);
764} 764}
765 765
766static void mmc_thread(void) 766static void mmc_thread(void)
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 405f6b6838..c7fcd93284 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -142,17 +142,19 @@ struct event_queue
142struct mutex 142struct mutex
143{ 143{
144 struct thread_entry *queue; /* waiter list */ 144 struct thread_entry *queue; /* waiter list */
145 int count; /* lock owner recursion count */ 145 int recursion; /* lock owner recursion count */
146#ifdef HAVE_PRIORITY_SCHEDULING 146#ifdef HAVE_PRIORITY_SCHEDULING
147 struct blocker blocker; /* priority inheritance info 147 struct blocker blocker; /* priority inheritance info
148 for waiters */ 148 for waiters */
149 bool no_preempt; /* don't allow higher-priority thread 149 bool no_preempt; /* don't allow higher-priority thread
150 to be scheduled even if woken */ 150 to be scheduled even if woken */
151#else 151#else
152 struct thread_entry *thread; 152 struct thread_entry *thread; /* Indicates owner thread - an owner
153 implies a locked state - same goes
154 for priority scheduling
155 (in blocker struct for that) */
153#endif 156#endif
154 IF_COP( struct corelock cl; ) /* multiprocessor sync */ 157 IF_COP( struct corelock cl; ) /* multiprocessor sync */
155 bool locked; /* locked semaphore */
156}; 158};
157 159
158#ifdef HAVE_SEMAPHORE_OBJECTS 160#ifdef HAVE_SEMAPHORE_OBJECTS
@@ -265,10 +267,17 @@ extern void mutex_init(struct mutex *m);
265extern void mutex_lock(struct mutex *m); 267extern void mutex_lock(struct mutex *m);
266extern void mutex_unlock(struct mutex *m); 268extern void mutex_unlock(struct mutex *m);
267#ifdef HAVE_PRIORITY_SCHEDULING 269#ifdef HAVE_PRIORITY_SCHEDULING
268/* Temporary function to disable mutex preempting a thread on unlock */ 270/* Deprecated temporary function to disable mutex preempting a thread on
271 * unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
272 * reliance on it is a bug! */
269static inline void mutex_set_preempt(struct mutex *m, bool preempt) 273static inline void mutex_set_preempt(struct mutex *m, bool preempt)
270 { m->no_preempt = !preempt; } 274 { m->no_preempt = !preempt; }
271#endif 275#else
276/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
277static inline bool mutex_test(const struct mutex *m)
278 { return m->thread != NULL; }
279#endif /* HAVE_PRIORITY_SCHEDULING */
280
272#ifdef HAVE_SEMAPHORE_OBJECTS 281#ifdef HAVE_SEMAPHORE_OBJECTS
273extern void semaphore_init(struct semaphore *s, int max, int start); 282extern void semaphore_init(struct semaphore *s, int max, int start);
274extern void semaphore_wait(struct semaphore *s); 283extern void semaphore_wait(struct semaphore *s);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 9d72a7eeda..aaa675241b 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -827,9 +827,8 @@ int queue_broadcast(long id, intptr_t data)
827 * Simple mutex functions ;) 827 * Simple mutex functions ;)
828 ****************************************************************************/ 828 ****************************************************************************/
829 829
830static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td) 830static inline void __attribute__((always_inline))
831 __attribute__((always_inline)); 831mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
832static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
833{ 832{
834#ifdef HAVE_PRIORITY_SCHEDULING 833#ifdef HAVE_PRIORITY_SCHEDULING
835 mtx->blocker.thread = td; 834 mtx->blocker.thread = td;
@@ -838,9 +837,8 @@ static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
838#endif 837#endif
839} 838}
840 839
841static inline struct thread_entry* mutex_get_thread(struct mutex *mtx) 840static inline struct thread_entry * __attribute__((always_inline))
842 __attribute__((always_inline)); 841mutex_get_thread(volatile struct mutex *mtx)
843static inline struct thread_entry* mutex_get_thread(struct mutex *mtx)
844{ 842{
845#ifdef HAVE_PRIORITY_SCHEDULING 843#ifdef HAVE_PRIORITY_SCHEDULING
846 return mtx->blocker.thread; 844 return mtx->blocker.thread;
@@ -855,8 +853,7 @@ void mutex_init(struct mutex *m)
855{ 853{
856 corelock_init(&m->cl); 854 corelock_init(&m->cl);
857 m->queue = NULL; 855 m->queue = NULL;
858 m->count = 0; 856 m->recursion = 0;
859 m->locked = false;
860 mutex_set_thread(m, NULL); 857 mutex_set_thread(m, NULL);
861#ifdef HAVE_PRIORITY_SCHEDULING 858#ifdef HAVE_PRIORITY_SCHEDULING
862 m->blocker.priority = PRIORITY_IDLE; 859 m->blocker.priority = PRIORITY_IDLE;
@@ -873,18 +870,18 @@ void mutex_lock(struct mutex *m)
873 if(current == mutex_get_thread(m)) 870 if(current == mutex_get_thread(m))
874 { 871 {
875 /* current thread already owns this mutex */ 872 /* current thread already owns this mutex */
876 m->count++; 873 m->recursion++;
877 return; 874 return;
878 } 875 }
879 876
880 /* lock out other cores */ 877 /* lock out other cores */
881 corelock_lock(&m->cl); 878 corelock_lock(&m->cl);
882 879
883 if(LIKELY(!m->locked)) 880 /* must read thread again inside cs (a multiprocessor concern really) */
881 if(LIKELY(mutex_get_thread(m) == NULL))
884 { 882 {
885 /* lock is open */ 883 /* lock is open */
886 mutex_set_thread(m, current); 884 mutex_set_thread(m, current);
887 m->locked = true;
888 corelock_unlock(&m->cl); 885 corelock_unlock(&m->cl);
889 return; 886 return;
890 } 887 }
@@ -912,10 +909,10 @@ void mutex_unlock(struct mutex *m)
912 mutex_get_thread(m)->name, 909 mutex_get_thread(m)->name,
913 thread_id_entry(THREAD_ID_CURRENT)->name); 910 thread_id_entry(THREAD_ID_CURRENT)->name);
914 911
915 if(m->count > 0) 912 if(m->recursion > 0)
916 { 913 {
917 /* this thread still owns lock */ 914 /* this thread still owns lock */
918 m->count--; 915 m->recursion--;
919 return; 916 return;
920 } 917 }
921 918
@@ -927,7 +924,6 @@ void mutex_unlock(struct mutex *m)
927 { 924 {
928 /* no threads waiting - open the lock */ 925 /* no threads waiting - open the lock */
929 mutex_set_thread(m, NULL); 926 mutex_set_thread(m, NULL);
930 m->locked = false;
931 corelock_unlock(&m->cl); 927 corelock_unlock(&m->cl);
932 return; 928 return;
933 } 929 }