summaryrefslogtreecommitdiff
path: root/firmware/kernel.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2010-12-26 05:59:39 +0000
committerMichael Sevakis <jethead71@rockbox.org>2010-12-26 05:59:39 +0000
commitf387cdef2131b2a0956ee8e4ff7221d3251b8f46 (patch)
tree00b9226241ecb83e8c92798241bb14ebcf5b6fab /firmware/kernel.c
parent1d460b603fded79cb879704a4d30a50318263211 (diff)
downloadrockbox-f387cdef2131b2a0956ee8e4ff7221d3251b8f46.tar.gz
rockbox-f387cdef2131b2a0956ee8e4ff7221d3251b8f46.zip
Make mutexes a tiny bit leaner. There is no need for a separate locked semaphore since having an owning thread also indicates that it is locked. Rename member 'count' to 'recursion' since it counts reentry, not locks. Change presents no compatibility issues for plugins or codecs because the structure size goes down.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@28901 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 9d72a7eeda..aaa675241b 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -827,9 +827,8 @@ int queue_broadcast(long id, intptr_t data)
827 * Simple mutex functions ;) 827 * Simple mutex functions ;)
828 ****************************************************************************/ 828 ****************************************************************************/
829 829
830static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td) 830static inline void __attribute__((always_inline))
831 __attribute__((always_inline)); 831mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
832static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
833{ 832{
834#ifdef HAVE_PRIORITY_SCHEDULING 833#ifdef HAVE_PRIORITY_SCHEDULING
835 mtx->blocker.thread = td; 834 mtx->blocker.thread = td;
@@ -838,9 +837,8 @@ static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
838#endif 837#endif
839} 838}
840 839
841static inline struct thread_entry* mutex_get_thread(struct mutex *mtx) 840static inline struct thread_entry * __attribute__((always_inline))
842 __attribute__((always_inline)); 841mutex_get_thread(volatile struct mutex *mtx)
843static inline struct thread_entry* mutex_get_thread(struct mutex *mtx)
844{ 842{
845#ifdef HAVE_PRIORITY_SCHEDULING 843#ifdef HAVE_PRIORITY_SCHEDULING
846 return mtx->blocker.thread; 844 return mtx->blocker.thread;
@@ -855,8 +853,7 @@ void mutex_init(struct mutex *m)
855{ 853{
856 corelock_init(&m->cl); 854 corelock_init(&m->cl);
857 m->queue = NULL; 855 m->queue = NULL;
858 m->count = 0; 856 m->recursion = 0;
859 m->locked = false;
860 mutex_set_thread(m, NULL); 857 mutex_set_thread(m, NULL);
861#ifdef HAVE_PRIORITY_SCHEDULING 858#ifdef HAVE_PRIORITY_SCHEDULING
862 m->blocker.priority = PRIORITY_IDLE; 859 m->blocker.priority = PRIORITY_IDLE;
@@ -873,18 +870,18 @@ void mutex_lock(struct mutex *m)
873 if(current == mutex_get_thread(m)) 870 if(current == mutex_get_thread(m))
874 { 871 {
875 /* current thread already owns this mutex */ 872 /* current thread already owns this mutex */
876 m->count++; 873 m->recursion++;
877 return; 874 return;
878 } 875 }
879 876
880 /* lock out other cores */ 877 /* lock out other cores */
881 corelock_lock(&m->cl); 878 corelock_lock(&m->cl);
882 879
883 if(LIKELY(!m->locked)) 880 /* must read thread again inside cs (a multiprocessor concern really) */
881 if(LIKELY(mutex_get_thread(m) == NULL))
884 { 882 {
885 /* lock is open */ 883 /* lock is open */
886 mutex_set_thread(m, current); 884 mutex_set_thread(m, current);
887 m->locked = true;
888 corelock_unlock(&m->cl); 885 corelock_unlock(&m->cl);
889 return; 886 return;
890 } 887 }
@@ -912,10 +909,10 @@ void mutex_unlock(struct mutex *m)
912 mutex_get_thread(m)->name, 909 mutex_get_thread(m)->name,
913 thread_id_entry(THREAD_ID_CURRENT)->name); 910 thread_id_entry(THREAD_ID_CURRENT)->name);
914 911
915 if(m->count > 0) 912 if(m->recursion > 0)
916 { 913 {
917 /* this thread still owns lock */ 914 /* this thread still owns lock */
918 m->count--; 915 m->recursion--;
919 return; 916 return;
920 } 917 }
921 918
@@ -927,7 +924,6 @@ void mutex_unlock(struct mutex *m)
927 { 924 {
928 /* no threads waiting - open the lock */ 925 /* no threads waiting - open the lock */
929 mutex_set_thread(m, NULL); 926 mutex_set_thread(m, NULL);
930 m->locked = false;
931 corelock_unlock(&m->cl); 927 corelock_unlock(&m->cl);
932 return; 928 return;
933 } 929 }