From f387cdef2131b2a0956ee8e4ff7221d3251b8f46 Mon Sep 17 00:00:00 2001 From: Michael Sevakis Date: Sun, 26 Dec 2010 05:59:39 +0000 Subject: Make mutexes a tiny bit leaner. There is no need for a separate locked semaphore since having an owning thread also indicates that it is locked. Rename member 'count' to 'recursion' since it counts reentry, not locks. Change presents no compatibility issues for plugins or codecs because the structure size goes down. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@28901 a1c6a512-1295-4272-9138-f99709370657 --- firmware/kernel.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'firmware/kernel.c') diff --git a/firmware/kernel.c b/firmware/kernel.c index 9d72a7eeda..aaa675241b 100644 --- a/firmware/kernel.c +++ b/firmware/kernel.c @@ -827,9 +827,8 @@ int queue_broadcast(long id, intptr_t data) * Simple mutex functions ;) ****************************************************************************/ -static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td) - __attribute__((always_inline)); -static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td) +static inline void __attribute__((always_inline)) +mutex_set_thread(struct mutex *mtx, struct thread_entry *td) { #ifdef HAVE_PRIORITY_SCHEDULING mtx->blocker.thread = td; @@ -838,9 +837,8 @@ static inline void mutex_set_thread(struct mutex *mtx, struct thread_entry *td) #endif } -static inline struct thread_entry* mutex_get_thread(struct mutex *mtx) - __attribute__((always_inline)); -static inline struct thread_entry* mutex_get_thread(struct mutex *mtx) +static inline struct thread_entry * __attribute__((always_inline)) +mutex_get_thread(volatile struct mutex *mtx) { #ifdef HAVE_PRIORITY_SCHEDULING return mtx->blocker.thread; @@ -855,8 +853,7 @@ void mutex_init(struct mutex *m) { corelock_init(&m->cl); m->queue = NULL; - m->count = 0; - m->locked = false; + m->recursion = 0; mutex_set_thread(m, NULL); #ifdef HAVE_PRIORITY_SCHEDULING m->blocker.priority = PRIORITY_IDLE; @@ -873,18 +870,18 @@ void mutex_lock(struct mutex *m) if(current == mutex_get_thread(m)) { /* current thread already owns this mutex */ - m->count++; + m->recursion++; return; } /* lock out other cores */ corelock_lock(&m->cl); - if(LIKELY(!m->locked)) + /* must read thread again inside cs (a multiprocessor concern really) */ + if(LIKELY(mutex_get_thread(m) == NULL)) { /* lock is open */ mutex_set_thread(m, current); - m->locked = true; corelock_unlock(&m->cl); return; } @@ -912,10 +909,10 @@ void mutex_unlock(struct mutex *m) mutex_get_thread(m)->name, thread_id_entry(THREAD_ID_CURRENT)->name); - if(m->count > 0) + if(m->recursion > 0) { /* this thread still owns lock */ - m->count--; + m->recursion--; return; } @@ -927,7 +924,6 @@ void mutex_unlock(struct mutex *m) { /* no threads waiting - open the lock */ mutex_set_thread(m, NULL); - m->locked = false; corelock_unlock(&m->cl); return; } -- cgit v1.2.3