summaryrefslogtreecommitdiff
path: root/firmware
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2014-04-24 04:09:18 -0400
committerMichael Sevakis <jethead71@rockbox.org>2014-08-06 02:47:47 +0200
commit533d396761b630e372166f6f0522ba1c2d128d70 (patch)
tree823a5f800049f62d4ea9f573b4cdeb3e7ff9b3e1 /firmware
parent6536f1db3eedf0a12d16c5504cba94725eb6500d (diff)
downloadrockbox-533d396761b630e372166f6f0522ba1c2d128d70.tar.gz
rockbox-533d396761b630e372166f6f0522ba1c2d128d70.zip
Add multi-reader, single-writer locks to kernel.
Any number of readers may be in the critical section at a time and writers are mutually exclusive to all other threads. They are a better choice when data is rarely modified but often read and multiple threads can safely access it for reading. Priority inheritance is fully implemented along with other changes to the kernel to fully support it on multiowner objects. This also cleans up priority code in the kernel and updates some associated structures in existing objects to the cleaner form. Currently doesn't add the mrsw_lock.[ch] files since they're not yet needed by anything but the supporting improvements are still useful. This includes a typed bitarray API (bitarray.h) which is pretty basic for now. Change-Id: Idbe43dcd9170358e06d48d00f1c69728ff45b0e3 Reviewed-on: http://gerrit.rockbox.org/801 Reviewed-by: Michael Sevakis <jethead71@rockbox.org> Tested: Michael Sevakis <jethead71@rockbox.org>
Diffstat (limited to 'firmware')
-rw-r--r--firmware/SOURCES3
-rw-r--r--firmware/include/bitarray.h231
-rw-r--r--firmware/kernel/include/kernel.h1
-rw-r--r--firmware/kernel/include/mutex.h20
-rw-r--r--firmware/kernel/include/queue.h2
-rw-r--r--firmware/kernel/include/thread.h81
-rw-r--r--firmware/kernel/mutex.c65
-rw-r--r--firmware/kernel/queue.c21
-rw-r--r--firmware/kernel/semaphore.c8
-rw-r--r--firmware/kernel/thread.c961
-rw-r--r--firmware/target/hosted/sdl/thread-sdl.c34
11 files changed, 853 insertions, 574 deletions
diff --git a/firmware/SOURCES b/firmware/SOURCES
index 5e37892efe..584254a666 100644
--- a/firmware/SOURCES
+++ b/firmware/SOURCES
@@ -1825,6 +1825,9 @@ drivers/touchpad.c
1825#ifdef HAVE_CORELOCK_OBJECT 1825#ifdef HAVE_CORELOCK_OBJECT
1826kernel/corelock.c 1826kernel/corelock.c
1827#endif 1827#endif
1828#if 0 /* pending dependent code */
1829kernel/mrsw_lock.c
1830#endif
1828kernel/mutex.c 1831kernel/mutex.c
1829kernel/queue.c 1832kernel/queue.c
1830#ifdef HAVE_SEMAPHORE_OBJECTS 1833#ifdef HAVE_SEMAPHORE_OBJECTS
diff --git a/firmware/include/bitarray.h b/firmware/include/bitarray.h
new file mode 100644
index 0000000000..4777ccb6a4
--- /dev/null
+++ b/firmware/include/bitarray.h
@@ -0,0 +1,231 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2014 by Michael Sevakis
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#ifndef BITARRAY_H
22#define BITARRAY_H
23
24/* Type-checked bit array definitions */
25
26/* All this stuff gets optimized into very simple object code */
27
28#define BITARRAY_WORD_BITS \
29 (sizeof (unsigned int) * 8)
30#define BITARRAY_NWORDS(bits) \
31 (((bits) + BITARRAY_WORD_BITS - 1) / BITARRAY_WORD_BITS)
32#define BITARRAY_BITWORD(bitnum) \
33 ((bitnum) / BITARRAY_WORD_BITS)
34#define BITARRAY_WORDBIT(bitnum) \
35 ((bitnum) % BITARRAY_WORD_BITS)
36#define BITARRAY_NBIT(word, bit) \
37 ((word)*BITARRAY_WORD_BITS + (bit))
38#define BITARRAY_BITS(bits) \
39 (BITARRAY_NWORDS(bits)*BITARRAY_WORD_BITS)
40#define BITARRAY_BITN(bitnum) \
41 (1u << BITARRAY_WORDBIT(bitnum))
42
43
44/** Iterators **/
45#include "config.h"
46#include <stdint.h>
47
48#if (defined(CPU_ARM) && ARM_ARCH >= 5) || UINT32_MAX < UINT_MAX
49#define __BITARRAY_CTZ(wval) __builtin_ctz(wval)
50#else
51#include "system.h"
52#define __BITARRAY_CTZ(wval) find_first_set_bit(wval)
53#endif
54#define __BITARRAY_POPCNT(wval) __builtin_popcount(wval)
55
56#ifndef BIT_N
57#define BIT_N(n) (1u << (n))
58#endif
59
60/* Enumerate each word index */
61#define FOR_EACH_BITARRAY_WORD_INDEX(nwords, index) \
62 for (unsigned int index = 0, _nwords = (nwords); \
63 index < _nwords; index++)
64
65/* Enumerate each word value */
66#define FOR_EACH_BITARRAY_WORD(a, wval) \
67 FOR_EACH_BITARRAY_WORD_INDEX(ARRAYLEN((a)->words), _w) \
68 for (unsigned int wval = (a)->words[_w], _ = 1; _; _--)
69
70/* Enumerate the bit number of each set bit of a word in sequence */
71#define FOR_EACH_BITARRAY_SET_WORD_BIT(wval, bit) \
72 for (unsigned int _wval = (wval), bit; \
73 _wval ? (((bit) = __BITARRAY_CTZ(_wval)), 1) : 0; \
74 _wval &= ~BIT_N(bit))
75
76/* Enumerate the bit number of each set bit in the bit array in sequence */
77#define FOR_EACH_BITARRAY_SET_BIT_ARR(nwords, words, nbit) \
78 FOR_EACH_BITARRAY_WORD_INDEX(nwords, _w) \
79 FOR_EACH_BITARRAY_SET_WORD_BIT(words[_w], _bit) \
80 for (unsigned int nbit = BITARRAY_NBIT(_w, _bit), _ = 1; _; _--)
81
82/* As above but takes an array type for an argument */
83#define FOR_EACH_BITARRAY_SET_BIT(a, nbit) \
84 FOR_EACH_BITARRAY_SET_BIT_ARR(ARRAYLEN((a)->words), (a)->words, nbit)
85
86
87/** Base functions (called by typed functions) **/
88
89/* Return the word associated with the bit */
90static inline unsigned int
91__bitarray_get_word(unsigned int words[], unsigned int bitnum)
92{
93 return words[BITARRAY_BITWORD(bitnum)];
94}
95
96/* Set the word associated with the bit */
97static inline void
98__bitarray_set_word(unsigned int words[], unsigned int bitnum,
99 unsigned int wordval)
100{
101 words[BITARRAY_BITWORD(bitnum)] = wordval;
102}
103
104/* Set the bit at index 'bitnum' to '1' */
105static inline void
106__bitarray_set_bit(unsigned int words[], unsigned int bitnum)
107{
108 unsigned int word = BITARRAY_BITWORD(bitnum);
109 unsigned int bit = BITARRAY_BITN(bitnum);
110 words[word] |= bit;
111}
112
113/* Set the bit at index 'bitnum' to '0' */
114static inline void
115__bitarray_clear_bit(unsigned int words[], unsigned int bitnum)
116{
117 unsigned int word = BITARRAY_BITWORD(bitnum);
118 unsigned int bit = BITARRAY_BITN(bitnum);
119 words[word] &= ~bit;
120}
121
122/* Return the value of the specified bit ('0' or '1') */
123static inline unsigned int
124__bitarray_test_bit(const unsigned int words[], unsigned int bitnum)
125{
126 unsigned int word = BITARRAY_BITWORD(bitnum);
127 unsigned int nbit = BITARRAY_WORDBIT(bitnum);
128 return (words[word] >> nbit) & 1u;
129}
130
131/* Check if all bits in the bit array are '0' */
132static inline bool
133__bitarray_is_clear(const unsigned int words[], unsigned int nbits)
134{
135 FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
136 {
137 if (words[word] != 0)
138 return false;
139 }
140
141 return true;
142}
143
144/* Set every bit in the array to '0' */
145static inline void
146__bitarray_clear(unsigned int words[], unsigned int nbits)
147{
148 FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
149 words[word] = 0;
150}
151
152/* Set every bit in the array to '1' */
153static inline void
154__bitarray_set(unsigned int words[], unsigned int nbits)
155{
156 FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
157 words[word] = ~0u;
158}
159
160/* Find the lowest-indexed '1' bit in the bit array, returning the size of
161 the array if none are set */
162static inline unsigned int
163__bitarray_ffs(const unsigned int words[], unsigned int nbits)
164{
165 FOR_EACH_BITARRAY_SET_BIT_ARR(BITARRAY_NWORDS(nbits), words, nbit)
166 return nbit;
167
168 return BITARRAY_BITS(nbits);
169}
170
171/* Return the number of bits currently set to '1' in the bit array */
172static inline unsigned int
173__bitarray_popcount(const unsigned int words[], unsigned int nbits)
174{
175 unsigned int count = 0;
176
177 FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
178 count += __BITARRAY_POPCNT(words[word]);
179
180 return count;
181}
182
183/**
184 * Giant macro to define all the typed functions
185 * typename: The name of the type (e.g. myarr_t myarr;)
186 * fnprefix: The prefix all functions get (e.g. myarr_set_bit)
187 * nbits : The minimum number of bits the array is meant to hold
188 * (the implementation rounds this up to the word size
189 * and all words may be fully utilized)
190 *
191 * uses 'typedef' to freely change from, e.g., struct to union without
192 * changing source code
193 */
194#define BITARRAY_TYPE_DECLARE(typename, fnprefix, nbits) \
195typedef struct \
196{ \
197 unsigned int words[BITARRAY_NWORDS(nbits)]; \
198} typename; \
199static inline unsigned int \
200fnprefix##_get_word(typename *array, unsigned int bitnum) \
201 { return __bitarray_get_word(array->words, bitnum); } \
202static inline void \
203fnprefix##_set_word(typename *array, unsigned int bitnum, \
204 unsigned int wordval) \
205 { __bitarray_set_word(array->words, bitnum, wordval); } \
206static inline void \
207fnprefix##_set_bit(typename *array, unsigned int bitnum) \
208 { __bitarray_set_bit(array->words, bitnum); } \
209static inline void \
210fnprefix##_clear_bit(typename *array, unsigned int bitnum) \
211 { __bitarray_clear_bit(array->words, bitnum); } \
212static inline unsigned int \
213fnprefix##_test_bit(const typename *array, unsigned int bitnum) \
214 { return __bitarray_test_bit(array->words, bitnum); } \
215static inline bool \
216fnprefix##_is_clear(const typename *array) \
217 { return __bitarray_is_clear(array->words, nbits); } \
218static inline void \
219fnprefix##_clear(typename *array) \
220 { __bitarray_clear(array->words, nbits); } \
221static inline void \
222fnprefix##_set(typename *array) \
223 { __bitarray_set(array->words, nbits); } \
224static inline unsigned int \
225fnprefix##_ffs(const typename *array) \
226 { return __bitarray_ffs(array->words, nbits); } \
227static inline unsigned int \
228fnprefix##_popcount(const typename *array) \
229 { return __bitarray_popcount(array->words, nbits); }
230
231#endif /* BITARRAY_H */
diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h
index fafff25ce4..d2ffffcda9 100644
--- a/firmware/kernel/include/kernel.h
+++ b/firmware/kernel/include/kernel.h
@@ -26,6 +26,7 @@
26#include "system.h" 26#include "system.h"
27#include "queue.h" 27#include "queue.h"
28#include "mutex.h" 28#include "mutex.h"
29#include "mrsw_lock.h"
29#include "tick.h" 30#include "tick.h"
30 31
31#ifdef INCLUDE_TIMEOUT_API 32#ifdef INCLUDE_TIMEOUT_API
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h
index bcf5701bd9..02b85f331f 100644
--- a/firmware/kernel/include/mutex.h
+++ b/firmware/kernel/include/mutex.h
@@ -28,20 +28,14 @@
28 28
29struct mutex 29struct mutex
30{ 30{
31 struct thread_entry *queue; /* waiter list */ 31 struct thread_entry *queue; /* waiter list */
32 int recursion; /* lock owner recursion count */ 32 int recursion; /* lock owner recursion count */
33 struct blocker blocker; /* priority inheritance info
34 for waiters and owner*/
35 IF_COP( struct corelock cl; ) /* multiprocessor sync */
33#ifdef HAVE_PRIORITY_SCHEDULING 36#ifdef HAVE_PRIORITY_SCHEDULING
34 struct blocker blocker; /* priority inheritance info 37 bool no_preempt;
35 for waiters */
36 bool no_preempt; /* don't allow higher-priority thread
37 to be scheduled even if woken */
38#else
39 struct thread_entry *thread; /* Indicates owner thread - an owner
40 implies a locked state - same goes
41 for priority scheduling
42 (in blocker struct for that) */
43#endif 38#endif
44 IF_COP( struct corelock cl; ) /* multiprocessor sync */
45}; 39};
46 40
47extern void mutex_init(struct mutex *m); 41extern void mutex_init(struct mutex *m);
@@ -56,7 +50,7 @@ static inline void mutex_set_preempt(struct mutex *m, bool preempt)
56#else 50#else
57/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */ 51/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
58static inline bool mutex_test(const struct mutex *m) 52static inline bool mutex_test(const struct mutex *m)
59 { return m->thread != NULL; } 53 { return m->blocker.thread != NULL; }
60#endif /* HAVE_PRIORITY_SCHEDULING */ 54#endif /* HAVE_PRIORITY_SCHEDULING */
61 55
62#endif /* MUTEX_H */ 56#endif /* MUTEX_H */
diff --git a/firmware/kernel/include/queue.h b/firmware/kernel/include/queue.h
index 1b404f8297..3f24598d5b 100644
--- a/firmware/kernel/include/queue.h
+++ b/firmware/kernel/include/queue.h
@@ -143,6 +143,8 @@ extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
143#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */ 143#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */
144#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */ 144#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */
145#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */ 145#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */
146#define QPEEK_FILTER1(a) QPEEK_FILTER2((a), (a))
147#define QPEEK_FILTER2(a, b) (&(const long [2]){ (a), (b) })
146extern bool queue_peek_ex(struct event_queue *q, 148extern bool queue_peek_ex(struct event_queue *q,
147 struct queue_event *ev, 149 struct queue_event *ev,
148 unsigned int flags, 150 unsigned int flags,
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h
index 8c13b462e6..f181f867cb 100644
--- a/firmware/kernel/include/thread.h
+++ b/firmware/kernel/include/thread.h
@@ -28,6 +28,7 @@
28#include <stdbool.h> 28#include <stdbool.h>
29#include "gcc_extensions.h" 29#include "gcc_extensions.h"
30#include "corelock.h" 30#include "corelock.h"
31#include "bitarray.h"
31 32
32/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works 33/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
33 * by giving high priority threads more CPU time than lower priority threads 34 * by giving high priority threads more CPU time than lower priority threads
@@ -80,6 +81,10 @@
80#endif 81#endif
81 82
82#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS) 83#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
84
85BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS)
86BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES)
87
83/* 88/*
84 * We need more stack when we run under a host 89 * We need more stack when we run under a host
85 * maybe more expensive C lib functions? 90 * maybe more expensive C lib functions?
@@ -134,32 +139,39 @@ struct thread_list
134 struct thread_entry *next; /* Next thread in a list */ 139 struct thread_entry *next; /* Next thread in a list */
135}; 140};
136 141
137#ifdef HAVE_PRIORITY_SCHEDULING 142/* Basic structure describing the owner of an object */
138struct blocker 143struct blocker
139{ 144{
140 struct thread_entry * volatile thread; /* thread blocking other threads 145 struct thread_entry * volatile thread; /* thread blocking other threads
141 (aka. object owner) */ 146 (aka. object owner) */
142 int priority; /* highest priority waiter */ 147#ifdef HAVE_PRIORITY_SCHEDULING
143 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread); 148 int priority; /* highest priority waiter */
149#endif
144}; 150};
145 151
146/* Choices of wakeup protocol */ 152/* If a thread has a blocker but the blocker's registered thread is NULL,
147 153 then it references this and the struct blocker pointer may be
148/* For transfer of object ownership by one thread to another thread by 154 reinterpreted as such. */
149 * the owning thread itself (mutexes) */ 155struct blocker_splay
150struct thread_entry * 156{
151 wakeup_priority_protocol_transfer(struct thread_entry *thread); 157 struct blocker blocker; /* blocker info (first!) */
158#ifdef HAVE_PRIORITY_SCHEDULING
159 threadbit_t mask; /* mask of nonzero tcounts */
160#if NUM_CORES > 1
161 struct corelock cl; /* mutual exclusion */
162#endif
163#endif /* HAVE_PRIORITY_SCHEDULING */
164};
152 165
153/* For release by owner where ownership doesn't change - other threads, 166#ifdef HAVE_PRIORITY_SCHEDULING
154 * interrupts, timeouts, etc. (mutex timeout, queues) */
155struct thread_entry *
156 wakeup_priority_protocol_release(struct thread_entry *thread);
157 167
168/* Quick-disinherit of priority elevation. Must be a running thread. */
169void priority_disinherit(struct thread_entry *thread, struct blocker *bl);
158 170
159struct priority_distribution 171struct priority_distribution
160{ 172{
161 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ 173 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
162 uint32_t mask; /* Bitmask of hist entries that are not zero */ 174 priobit_t mask; /* Bitmask of hist entries that are not zero */
163}; 175};
164 176
165#endif /* HAVE_PRIORITY_SCHEDULING */ 177#endif /* HAVE_PRIORITY_SCHEDULING */
@@ -210,6 +222,7 @@ struct thread_entry
210 volatile intptr_t retval; /* Return value from a blocked operation/ 222 volatile intptr_t retval; /* Return value from a blocked operation/
211 misc. use */ 223 misc. use */
212#endif 224#endif
225 uint32_t id; /* Current slot id */
213 int __errno; /* Thread error number (errno tls) */ 226 int __errno; /* Thread error number (errno tls) */
214#ifdef HAVE_PRIORITY_SCHEDULING 227#ifdef HAVE_PRIORITY_SCHEDULING
215 /* Priority summary of owned objects that support inheritance */ 228 /* Priority summary of owned objects that support inheritance */
@@ -226,7 +239,6 @@ struct thread_entry
226 unsigned char priority; /* Scheduled priority (higher of base or 239 unsigned char priority; /* Scheduled priority (higher of base or
227 all threads blocked by this one) */ 240 all threads blocked by this one) */
228#endif 241#endif
229 uint16_t id; /* Current slot id */
230 unsigned short stack_size; /* Size of stack in bytes */ 242 unsigned short stack_size; /* Size of stack in bytes */
231 unsigned char state; /* Thread slot state (STATE_*) */ 243 unsigned char state; /* Thread slot state (STATE_*) */
232#ifdef HAVE_SCHEDULER_BOOSTCTRL 244#ifdef HAVE_SCHEDULER_BOOSTCTRL
@@ -238,11 +250,12 @@ struct thread_entry
238}; 250};
239 251
240/*** Macros for internal use ***/ 252/*** Macros for internal use ***/
241/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */ 253/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
242#define THREAD_ID_VERSION_SHIFT 8 254#define THREAD_ID_VERSION_SHIFT 8
243#define THREAD_ID_VERSION_MASK 0xff00 255#define THREAD_ID_VERSION_MASK 0xffffff00
244#define THREAD_ID_SLOT_MASK 0x00ff 256#define THREAD_ID_SLOT_MASK 0x000000ff
245#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) 257#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
258#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
246 259
247#ifdef HAVE_CORELOCK_OBJECT 260#ifdef HAVE_CORELOCK_OBJECT
248/* Operations to be performed just before stopping a thread and starting 261/* Operations to be performed just before stopping a thread and starting
@@ -337,11 +350,8 @@ void switch_thread(void);
337/* Blocks a thread for at least the specified number of ticks (0 = wait until 350/* Blocks a thread for at least the specified number of ticks (0 = wait until
338 * next tick) */ 351 * next tick) */
339void sleep_thread(int ticks); 352void sleep_thread(int ticks);
340/* Indefinitely blocks the current thread on a thread queue */ 353/* Blocks the current thread on a thread queue (< 0 == infinite) */
341void block_thread(struct thread_entry *current); 354void block_thread(struct thread_entry *current, int timeout);
342/* Blocks the current thread on a thread queue until explicitely woken or
343 * the timeout is reached */
344void block_thread_w_tmo(struct thread_entry *current, int timeout);
345 355
346/* Return bit flags for thread wakeup */ 356/* Return bit flags for thread wakeup */
347#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ 357#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
@@ -350,15 +360,32 @@ void block_thread_w_tmo(struct thread_entry *current, int timeout);
350 higher priority than current were woken) */ 360 higher priority than current were woken) */
351 361
352/* A convenience function for waking an entire queue of threads. */ 362/* A convenience function for waking an entire queue of threads. */
353unsigned int thread_queue_wake(struct thread_entry **list); 363unsigned int thread_queue_wake(struct thread_entry **list,
364 volatile int *count);
354 365
355/* Wakeup a thread at the head of a list */ 366/* Wakeup a thread at the head of a list */
356unsigned int wakeup_thread(struct thread_entry **list); 367enum wakeup_thread_protocol
368{
369 WAKEUP_DEFAULT,
370 WAKEUP_TRANSFER,
371 WAKEUP_RELEASE,
372 WAKEUP_TRANSFER_MULTI,
373};
374
375unsigned int wakeup_thread_(struct thread_entry **list
376 IF_PRIO(, enum wakeup_thread_protocol proto));
357 377
358#ifdef HAVE_PRIORITY_SCHEDULING 378#ifdef HAVE_PRIORITY_SCHEDULING
379#define wakeup_thread(list, proto) \
380 wakeup_thread_((list), (proto))
381
359int thread_set_priority(unsigned int thread_id, int priority); 382int thread_set_priority(unsigned int thread_id, int priority);
360int thread_get_priority(unsigned int thread_id); 383int thread_get_priority(unsigned int thread_id);
384#else /* !HAVE_PRIORITY_SCHEDULING */
385#define wakeup_thread(list, proto...) \
386 wakeup_thread_((list));
361#endif /* HAVE_PRIORITY_SCHEDULING */ 387#endif /* HAVE_PRIORITY_SCHEDULING */
388
362#ifdef HAVE_IO_PRIORITY 389#ifdef HAVE_IO_PRIORITY
363void thread_set_io_priority(unsigned int thread_id, int io_priority); 390void thread_set_io_priority(unsigned int thread_id, int io_priority);
364int thread_get_io_priority(unsigned int thread_id); 391int thread_get_io_priority(unsigned int thread_id);
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c
index f1e4b3c722..2e90b0f4b1 100644
--- a/firmware/kernel/mutex.c
+++ b/firmware/kernel/mutex.c
@@ -27,31 +27,10 @@
27#include <stdbool.h> 27#include <stdbool.h>
28#include "config.h" 28#include "config.h"
29#include "system.h" 29#include "system.h"
30#include "mutex.h" 30#include "kernel.h"
31#include "corelock.h"
32#include "thread-internal.h" 31#include "thread-internal.h"
33#include "kernel-internal.h" 32#include "kernel-internal.h"
34 33
35static inline void __attribute__((always_inline))
36mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
37{
38#ifdef HAVE_PRIORITY_SCHEDULING
39 mtx->blocker.thread = td;
40#else
41 mtx->thread = td;
42#endif
43}
44
45static inline struct thread_entry * __attribute__((always_inline))
46mutex_get_thread(volatile struct mutex *mtx)
47{
48#ifdef HAVE_PRIORITY_SCHEDULING
49 return mtx->blocker.thread;
50#else
51 return mtx->thread;
52#endif
53}
54
55/* Initialize a mutex object - call before any use and do not call again once 34/* Initialize a mutex object - call before any use and do not call again once
56 * the object is available to other threads */ 35 * the object is available to other threads */
57void mutex_init(struct mutex *m) 36void mutex_init(struct mutex *m)
@@ -59,10 +38,9 @@ void mutex_init(struct mutex *m)
59 corelock_init(&m->cl); 38 corelock_init(&m->cl);
60 m->queue = NULL; 39 m->queue = NULL;
61 m->recursion = 0; 40 m->recursion = 0;
62 mutex_set_thread(m, NULL); 41 m->blocker.thread = NULL;
63#ifdef HAVE_PRIORITY_SCHEDULING 42#ifdef HAVE_PRIORITY_SCHEDULING
64 m->blocker.priority = PRIORITY_IDLE; 43 m->blocker.priority = PRIORITY_IDLE;
65 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
66 m->no_preempt = false; 44 m->no_preempt = false;
67#endif 45#endif
68} 46}
@@ -72,7 +50,7 @@ void mutex_lock(struct mutex *m)
72{ 50{
73 struct thread_entry *current = thread_self_entry(); 51 struct thread_entry *current = thread_self_entry();
74 52
75 if(current == mutex_get_thread(m)) 53 if(current == m->blocker.thread)
76 { 54 {
77 /* current thread already owns this mutex */ 55 /* current thread already owns this mutex */
78 m->recursion++; 56 m->recursion++;
@@ -83,10 +61,10 @@ void mutex_lock(struct mutex *m)
83 corelock_lock(&m->cl); 61 corelock_lock(&m->cl);
84 62
85 /* must read thread again inside cs (a multiprocessor concern really) */ 63 /* must read thread again inside cs (a multiprocessor concern really) */
86 if(LIKELY(mutex_get_thread(m) == NULL)) 64 if(LIKELY(m->blocker.thread == NULL))
87 { 65 {
88 /* lock is open */ 66 /* lock is open */
89 mutex_set_thread(m, current); 67 m->blocker.thread = current;
90 corelock_unlock(&m->cl); 68 corelock_unlock(&m->cl);
91 return; 69 return;
92 } 70 }
@@ -97,7 +75,7 @@ void mutex_lock(struct mutex *m)
97 current->bqp = &m->queue; 75 current->bqp = &m->queue;
98 76
99 disable_irq(); 77 disable_irq();
100 block_thread(current); 78 block_thread(current, TIMEOUT_BLOCK);
101 79
102 corelock_unlock(&m->cl); 80 corelock_unlock(&m->cl);
103 81
@@ -109,9 +87,9 @@ void mutex_lock(struct mutex *m)
109void mutex_unlock(struct mutex *m) 87void mutex_unlock(struct mutex *m)
110{ 88{
111 /* unlocker not being the owner is an unlocking violation */ 89 /* unlocker not being the owner is an unlocking violation */
112 KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(), 90 KERNEL_ASSERT(m->blocker.thread == thread_self_entry(),
113 "mutex_unlock->wrong thread (%s != %s)\n", 91 "mutex_unlock->wrong thread (%s != %s)\n",
114 mutex_get_thread(m)->name, 92 m->blocker.thread->name,
115 thread_self_entry()->name); 93 thread_self_entry()->name);
116 94
117 if(m->recursion > 0) 95 if(m->recursion > 0)
@@ -128,25 +106,24 @@ void mutex_unlock(struct mutex *m)
128 if(LIKELY(m->queue == NULL)) 106 if(LIKELY(m->queue == NULL))
129 { 107 {
130 /* no threads waiting - open the lock */ 108 /* no threads waiting - open the lock */
131 mutex_set_thread(m, NULL); 109 m->blocker.thread = NULL;
132 corelock_unlock(&m->cl); 110 corelock_unlock(&m->cl);
133 return; 111 return;
134 } 112 }
135 else
136 {
137 const int oldlevel = disable_irq_save();
138 /* Tranfer of owning thread is handled in the wakeup protocol
139 * if priorities are enabled otherwise just set it from the
140 * queue head. */
141 IFN_PRIO( mutex_set_thread(m, m->queue); )
142 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
143 restore_irq(oldlevel);
144 113
145 corelock_unlock(&m->cl); 114 const int oldlevel = disable_irq_save();
115 /* Tranfer of owning thread is handled in the wakeup protocol
116 * if priorities are enabled otherwise just set it from the
117 * queue head. */
118 IFN_PRIO( m->blocker.thread = m->queue; )
119 unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
120 restore_irq(oldlevel);
121
122 corelock_unlock(&m->cl);
146 123
147#ifdef HAVE_PRIORITY_SCHEDULING 124#ifdef HAVE_PRIORITY_SCHEDULING
148 if((result & THREAD_SWITCH) && !m->no_preempt) 125 if((result & THREAD_SWITCH) && !m->no_preempt)
149 switch_thread(); 126 switch_thread();
150#endif 127#endif
151 } 128 (void)result;
152} 129}
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c
index 379e3f62c8..22a8da9bd3 100644
--- a/firmware/kernel/queue.c
+++ b/firmware/kernel/queue.c
@@ -84,7 +84,7 @@ static void queue_release_sender(struct thread_entry * volatile * sender,
84 *thread->bqp = thread; /* Move blocking queue head to thread since 84 *thread->bqp = thread; /* Move blocking queue head to thread since
85 wakeup_thread wakes the first thread in 85 wakeup_thread wakes the first thread in
86 the list. */ 86 the list. */
87 wakeup_thread(thread->bqp); 87 wakeup_thread(thread->bqp, WAKEUP_RELEASE);
88} 88}
89 89
90/* Releases any waiting threads that are queued with queue_send - 90/* Releases any waiting threads that are queued with queue_send -
@@ -108,16 +108,16 @@ static void queue_release_all_senders(struct event_queue *q)
108 } 108 }
109} 109}
110 110
111#ifdef HAVE_WAKEUP_EXT_CB
111/* Callback to do extra forced removal steps from sender list in addition 112/* Callback to do extra forced removal steps from sender list in addition
112 * to the normal blocking queue removal and priority dis-inherit */ 113 * to the normal blocking queue removal and priority dis-inherit */
113static void queue_remove_sender_thread_cb(struct thread_entry *thread) 114static void queue_remove_sender_thread_cb(struct thread_entry *thread)
114{ 115{
115 *((struct thread_entry **)thread->retval) = NULL; 116 *((struct thread_entry **)thread->retval) = NULL;
116#ifdef HAVE_WAKEUP_EXT_CB
117 thread->wakeup_ext_cb = NULL; 117 thread->wakeup_ext_cb = NULL;
118#endif
119 thread->retval = 0; 118 thread->retval = 0;
120} 119}
120#endif /* HAVE_WAKEUP_EXT_CB */
121 121
122/* Enables queue_send on the specified queue - caller allocates the extra 122/* Enables queue_send on the specified queue - caller allocates the extra
123 * data structure. Only queues which are taken to be owned by a thread should 123 * data structure. Only queues which are taken to be owned by a thread should
@@ -139,7 +139,6 @@ void queue_enable_queue_send(struct event_queue *q,
139 { 139 {
140 memset(send, 0, sizeof(*send)); 140 memset(send, 0, sizeof(*send));
141#ifdef HAVE_PRIORITY_SCHEDULING 141#ifdef HAVE_PRIORITY_SCHEDULING
142 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
143 send->blocker.priority = PRIORITY_IDLE; 142 send->blocker.priority = PRIORITY_IDLE;
144 if(owner_id != 0) 143 if(owner_id != 0)
145 { 144 {
@@ -268,7 +267,7 @@ void queue_delete(struct event_queue *q)
268 corelock_unlock(&all_queues.cl); 267 corelock_unlock(&all_queues.cl);
269 268
270 /* Release thread(s) waiting on queue head */ 269 /* Release thread(s) waiting on queue head */
271 thread_queue_wake(&q->queue); 270 thread_queue_wake(&q->queue, NULL);
272 271
273#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 272#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
274 if(q->send) 273 if(q->send)
@@ -325,7 +324,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
325 IF_COP( current->obj_cl = &q->cl; ) 324 IF_COP( current->obj_cl = &q->cl; )
326 current->bqp = &q->queue; 325 current->bqp = &q->queue;
327 326
328 block_thread(current); 327 block_thread(current, TIMEOUT_BLOCK);
329 328
330 corelock_unlock(&q->cl); 329 corelock_unlock(&q->cl);
331 switch_thread(); 330 switch_thread();
@@ -386,7 +385,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
386 IF_COP( current->obj_cl = &q->cl; ) 385 IF_COP( current->obj_cl = &q->cl; )
387 current->bqp = &q->queue; 386 current->bqp = &q->queue;
388 387
389 block_thread_w_tmo(current, ticks); 388 block_thread(current, ticks);
390 corelock_unlock(&q->cl); 389 corelock_unlock(&q->cl);
391 390
392 switch_thread(); 391 switch_thread();
@@ -443,7 +442,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
443 queue_do_unblock_sender(q->send, wr); 442 queue_do_unblock_sender(q->send, wr);
444 443
445 /* Wakeup a waiting thread if any */ 444 /* Wakeup a waiting thread if any */
446 wakeup_thread(&q->queue); 445 wakeup_thread(&q->queue, WAKEUP_DEFAULT);
447 446
448 corelock_unlock(&q->cl); 447 corelock_unlock(&q->cl);
449 restore_irq(oldlevel); 448 restore_irq(oldlevel);
@@ -481,7 +480,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
481 } 480 }
482 481
483 /* Wakeup a waiting thread if any */ 482 /* Wakeup a waiting thread if any */
484 wakeup_thread(&q->queue); 483 wakeup_thread(&q->queue, WAKEUP_DEFAULT);
485 484
486 /* Save thread in slot, add to list and wait for reply */ 485 /* Save thread in slot, add to list and wait for reply */
487 *spp = current; 486 *spp = current;
@@ -493,7 +492,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
493 current->retval = (intptr_t)spp; 492 current->retval = (intptr_t)spp;
494 current->bqp = &send->list; 493 current->bqp = &send->list;
495 494
496 block_thread(current); 495 block_thread(current, TIMEOUT_BLOCK);
497 496
498 corelock_unlock(&q->cl); 497 corelock_unlock(&q->cl);
499 switch_thread(); 498 switch_thread();
@@ -502,7 +501,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
502 } 501 }
503 502
504 /* Function as queue_post if sending is not enabled */ 503 /* Function as queue_post if sending is not enabled */
505 wakeup_thread(&q->queue); 504 wakeup_thread(&q->queue, WAKEUP_DEFAULT);
506 505
507 corelock_unlock(&q->cl); 506 corelock_unlock(&q->cl);
508 restore_irq(oldlevel); 507 restore_irq(oldlevel);
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c
index f9ff0ad987..b6ce7fd742 100644
--- a/firmware/kernel/semaphore.c
+++ b/firmware/kernel/semaphore.c
@@ -82,11 +82,7 @@ int semaphore_wait(struct semaphore *s, int timeout)
82 * explicit in semaphore_release */ 82 * explicit in semaphore_release */
83 current->retval = OBJ_WAIT_TIMEDOUT; 83 current->retval = OBJ_WAIT_TIMEDOUT;
84 84
85 if(timeout > 0) 85 block_thread(current, timeout);
86 block_thread_w_tmo(current, timeout); /* ...or timed out... */
87 else
88 block_thread(current); /* -timeout = infinite */
89
90 corelock_unlock(&s->cl); 86 corelock_unlock(&s->cl);
91 87
92 /* ...and turn control over to next thread */ 88 /* ...and turn control over to next thread */
@@ -118,7 +114,7 @@ void semaphore_release(struct semaphore *s)
118 KERNEL_ASSERT(s->count == 0, 114 KERNEL_ASSERT(s->count == 0,
119 "semaphore_release->threads queued but count=%d!\n", s->count); 115 "semaphore_release->threads queued but count=%d!\n", s->count);
120 s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ 116 s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
121 result = wakeup_thread(&s->queue); 117 result = wakeup_thread(&s->queue, WAKEUP_DEFAULT);
122 } 118 }
123 else 119 else
124 { 120 {
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
index 43ff584a68..0a47f97e93 100644
--- a/firmware/kernel/thread.c
+++ b/firmware/kernel/thread.c
@@ -246,13 +246,13 @@ static void thread_stkov(struct thread_entry *thread)
246 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) 246 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
247#else 247#else
248#define LOCK_THREAD(thread) \ 248#define LOCK_THREAD(thread) \
249 ({ }) 249 ({ (void)(thread); })
250#define TRY_LOCK_THREAD(thread) \ 250#define TRY_LOCK_THREAD(thread) \
251 ({ }) 251 ({ (void)(thread); })
252#define UNLOCK_THREAD(thread) \ 252#define UNLOCK_THREAD(thread) \
253 ({ }) 253 ({ (void)(thread); })
254#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ 254#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
255 ({ }) 255 ({ (void)(thread); })
256#endif 256#endif
257 257
258/* RTR list */ 258/* RTR list */
@@ -279,6 +279,100 @@ static void thread_stkov(struct thread_entry *thread)
279#define rtr_move_entry_inl(core, from, to) 279#define rtr_move_entry_inl(core, from, to)
280#endif 280#endif
281 281
282static inline void thread_store_context(struct thread_entry *thread)
283{
284#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
285 thread->__errno = errno;
286#endif
287 store_context(&thread->context);
288}
289
290static inline void thread_load_context(struct thread_entry *thread)
291{
292 load_context(&thread->context);
293#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
294 errno = thread->__errno;
295#endif
296}
297
298static inline unsigned int should_switch_tasks(void)
299{
300 unsigned int result = THREAD_OK;
301
302#ifdef HAVE_PRIORITY_SCHEDULING
303 struct thread_entry *current = cores[CURRENT_CORE].running;
304 if (current &&
305 priobit_ffs(&cores[IF_COP_CORE(current->core)].rtr.mask)
306 < current->priority)
307 {
308 /* There is a thread ready to run of higher priority on the same
309 * core as the current one; recommend a task switch. */
310 result |= THREAD_SWITCH;
311 }
312#endif /* HAVE_PRIORITY_SCHEDULING */
313
314 return result;
315}
316
317#ifdef HAVE_PRIORITY_SCHEDULING
318/*---------------------------------------------------------------------------
319 * Locks the thread registered as the owner of the block and makes sure it
320 * didn't change in the meantime
321 *---------------------------------------------------------------------------
322 */
323#if NUM_CORES == 1
324static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
325{
326 return bl->thread;
327}
328#else /* NUM_CORES > 1 */
329static struct thread_entry * lock_blocker_thread(struct blocker *bl)
330{
331 /* The blocker thread may change during the process of trying to
332 capture it */
333 while (1)
334 {
335 struct thread_entry *t = bl->thread;
336
337 /* TRY, or else deadlocks are possible */
338 if (!t)
339 {
340 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
341 if (corelock_try_lock(&blsplay->cl))
342 {
343 if (!bl->thread)
344 return NULL; /* Still multi */
345
346 corelock_unlock(&blsplay->cl);
347 }
348 }
349 else
350 {
351 if (TRY_LOCK_THREAD(t))
352 {
353 if (bl->thread == t)
354 return t;
355
356 UNLOCK_THREAD(t);
357 }
358 }
359 }
360}
361#endif /* NUM_CORES */
362
363static inline void unlock_blocker_thread(struct blocker *bl)
364{
365#if NUM_CORES > 1
366 struct thread_entry *blt = bl->thread;
367 if (blt)
368 UNLOCK_THREAD(blt);
369 else
370 corelock_unlock(&((struct blocker_splay *)bl)->cl);
371#endif /* NUM_CORES > 1*/
372 (void)bl;
373}
374#endif /* HAVE_PRIORITY_SCHEDULING */
375
282/*--------------------------------------------------------------------------- 376/*---------------------------------------------------------------------------
283 * Thread list structure - circular: 377 * Thread list structure - circular:
284 * +------------------------------+ 378 * +------------------------------+
@@ -420,7 +514,6 @@ static void remove_from_list_tmo(struct thread_entry *thread)
420 } 514 }
421} 515}
422 516
423
424#ifdef HAVE_PRIORITY_SCHEDULING 517#ifdef HAVE_PRIORITY_SCHEDULING
425/*--------------------------------------------------------------------------- 518/*---------------------------------------------------------------------------
426 * Priority distribution structure (one category for each possible priority): 519 * Priority distribution structure (one category for each possible priority):
@@ -476,19 +569,9 @@ static void remove_from_list_tmo(struct thread_entry *thread)
476static inline unsigned int prio_add_entry( 569static inline unsigned int prio_add_entry(
477 struct priority_distribution *pd, int priority) 570 struct priority_distribution *pd, int priority)
478{ 571{
479 unsigned int count; 572 unsigned int count = ++pd->hist[priority];
480 /* Enough size/instruction count difference for ARM makes it worth it to 573 if (count == 1)
481 * use different code (192 bytes for ARM). Only thing better is ASM. */ 574 priobit_set_bit(&pd->mask, priority);
482#ifdef CPU_ARM
483 count = pd->hist[priority];
484 if (++count == 1)
485 pd->mask |= 1 << priority;
486 pd->hist[priority] = count;
487#else /* This one's better for Coldfire */
488 if ((count = ++pd->hist[priority]) == 1)
489 pd->mask |= 1 << priority;
490#endif
491
492 return count; 575 return count;
493} 576}
494 577
@@ -499,18 +582,9 @@ static inline unsigned int prio_add_entry(
499static inline unsigned int prio_subtract_entry( 582static inline unsigned int prio_subtract_entry(
500 struct priority_distribution *pd, int priority) 583 struct priority_distribution *pd, int priority)
501{ 584{
502 unsigned int count; 585 unsigned int count = --pd->hist[priority];
503 586 if (count == 0)
504#ifdef CPU_ARM 587 priobit_clear_bit(&pd->mask, priority);
505 count = pd->hist[priority];
506 if (--count == 0)
507 pd->mask &= ~(1 << priority);
508 pd->hist[priority] = count;
509#else
510 if ((count = --pd->hist[priority]) == 0)
511 pd->mask &= ~(1 << priority);
512#endif
513
514 return count; 588 return count;
515} 589}
516 590
@@ -521,31 +595,38 @@ static inline unsigned int prio_subtract_entry(
521static inline void prio_move_entry( 595static inline void prio_move_entry(
522 struct priority_distribution *pd, int from, int to) 596 struct priority_distribution *pd, int from, int to)
523{ 597{
524 uint32_t mask = pd->mask; 598 if (--pd->hist[from] == 0)
599 priobit_clear_bit(&pd->mask, from);
600
601 if (++pd->hist[to] == 1)
602 priobit_set_bit(&pd->mask, to);
603}
604#endif /* HAVE_PRIORITY_SCHEDULING */
605
606/*---------------------------------------------------------------------------
607 * Move a thread back to a running state on its core.
608 *---------------------------------------------------------------------------
609 */
610static void core_schedule_wakeup(struct thread_entry *thread)
611{
612 const unsigned int core = IF_COP_CORE(thread->core);
525 613
526#ifdef CPU_ARM 614 RTR_LOCK(core);
527 unsigned int count;
528 615
529 count = pd->hist[from]; 616 thread->state = STATE_RUNNING;
530 if (--count == 0)
531 mask &= ~(1 << from);
532 pd->hist[from] = count;
533 617
534 count = pd->hist[to]; 618 add_to_list_l(&cores[core].running, thread);
535 if (++count == 1) 619 rtr_add_entry(core, thread->priority);
536 mask |= 1 << to;
537 pd->hist[to] = count;
538#else
539 if (--pd->hist[from] == 0)
540 mask &= ~(1 << from);
541 620
542 if (++pd->hist[to] == 1) 621 RTR_UNLOCK(core);
543 mask |= 1 << to;
544#endif
545 622
546 pd->mask = mask; 623#if NUM_CORES > 1
624 if (core != CURRENT_CORE)
625 core_wake(core);
626#endif
547} 627}
548 628
629#ifdef HAVE_PRIORITY_SCHEDULING
549/*--------------------------------------------------------------------------- 630/*---------------------------------------------------------------------------
550 * Change the priority and rtr entry for a running thread 631 * Change the priority and rtr entry for a running thread
551 *--------------------------------------------------------------------------- 632 *---------------------------------------------------------------------------
@@ -605,191 +686,211 @@ static int find_highest_priority_in_list_l(
605 * those are prevented, right? :-) 686 * those are prevented, right? :-)
606 *--------------------------------------------------------------------------- 687 *---------------------------------------------------------------------------
607 */ 688 */
608static struct thread_entry * 689static void inherit_priority(
609 blocker_inherit_priority(struct thread_entry *current) 690 struct blocker * const blocker0, struct blocker *bl,
691 struct thread_entry *blt, int newblpr)
610{ 692{
611 const int priority = current->priority; 693 int oldblpr = bl->priority;
612 struct blocker *bl = current->blocker;
613 struct thread_entry * const tstart = current;
614 struct thread_entry *bl_t = bl->thread;
615
616 /* Blocker cannot change since the object protection is held */
617 LOCK_THREAD(bl_t);
618 694
619 for (;;) 695 while (1)
620 { 696 {
621 struct thread_entry *next; 697 if (blt == NULL)
622 int bl_pr = bl->priority; 698 {
699 /* Multiple owners */
700 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
701
702 /* Recurse down the all the branches of this; it's the only way.
703 We might meet the same queue several times if more than one of
704 these threads is waiting the same queue. That isn't a problem
705 for us since we early-terminate, just notable. */
706 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
707 {
708 bl->priority = oldblpr; /* To see the change each time */
709 blt = &threads[slotnum];
710 LOCK_THREAD(blt);
711 inherit_priority(blocker0, bl, blt, newblpr);
712 }
623 713
624 if (priority >= bl_pr) 714 corelock_unlock(&blsplay->cl);
625 break; /* Object priority already high enough */ 715 return;
716 }
626 717
627 bl->priority = priority; 718 bl->priority = newblpr;
628 719
629 /* Add this one */ 720 /* Update blocker thread inheritance record */
630 prio_add_entry(&bl_t->pdist, priority); 721 if (newblpr < PRIORITY_IDLE)
722 prio_add_entry(&blt->pdist, newblpr);
631 723
632 if (bl_pr < PRIORITY_IDLE) 724 if (oldblpr < PRIORITY_IDLE)
633 { 725 prio_subtract_entry(&blt->pdist, oldblpr);
634 /* Not first waiter - subtract old one */
635 prio_subtract_entry(&bl_t->pdist, bl_pr);
636 }
637 726
638 if (priority >= bl_t->priority) 727 int oldpr = blt->priority;
639 break; /* Thread priority high enough */ 728 int newpr = priobit_ffs(&blt->pdist.mask);
729 if (newpr == oldpr)
730 break; /* No blocker thread priority change */
640 731
641 if (bl_t->state == STATE_RUNNING) 732 if (blt->state == STATE_RUNNING)
642 { 733 {
643 /* Blocking thread is a running thread therefore there are no 734 set_running_thread_priority(blt, newpr);
644 * further blockers. Change the "run queue" on which it 735 break; /* Running: last in chain */
645 * resides. */
646 set_running_thread_priority(bl_t, priority);
647 break;
648 } 736 }
649 737
650 bl_t->priority = priority; 738 /* Blocker is blocked */
739 blt->priority = newpr;
651 740
652 /* If blocking thread has a blocker, apply transitive inheritance */ 741 bl = blt->blocker;
653 bl = bl_t->blocker; 742 if (LIKELY(bl == NULL))
743 break; /* Block doesn't support PIP */
654 744
655 if (bl == NULL) 745 if (UNLIKELY(bl == blocker0))
656 break; /* End of chain or object doesn't support inheritance */ 746 break; /* Full circle - deadlock! */
657 747
658 next = bl->thread; 748 /* Blocker becomes current thread and the process repeats */
749 struct thread_entry **bqp = blt->bqp;
750 struct thread_entry *t = blt;
751 blt = lock_blocker_thread(bl);
659 752
660 if (UNLIKELY(next == tstart)) 753 UNLOCK_THREAD(t);
661 break; /* Full-circle - deadlock! */
662 754
663 UNLOCK_THREAD(current); 755 /* Adjust this wait queue */
756 oldblpr = bl->priority;
757 if (newpr <= oldblpr)
758 newblpr = newpr;
759 else if (oldpr <= oldblpr)
760 newblpr = find_highest_priority_in_list_l(*bqp);
664 761
665#if NUM_CORES > 1 762 if (newblpr == oldblpr)
666 for (;;) 763 break; /* Queue priority not changing */
667 { 764 }
668 LOCK_THREAD(next);
669 765
670 /* Blocker could change - retest condition */ 766 UNLOCK_THREAD(blt);
671 if (LIKELY(bl->thread == next)) 767}
672 break;
673 768
674 UNLOCK_THREAD(next); 769/*---------------------------------------------------------------------------
675 next = bl->thread; 770 * Quick-disinherit of priority elevation. 'thread' must be a running thread.
676 } 771 *---------------------------------------------------------------------------
677#endif 772 */
678 current = bl_t; 773static void priority_disinherit_internal(struct thread_entry *thread,
679 bl_t = next; 774 int blpr)
775{
776 if (blpr < PRIORITY_IDLE &&
777 prio_subtract_entry(&thread->pdist, blpr) == 0 &&
778 blpr <= thread->priority)
779 {
780 int priority = priobit_ffs(&thread->pdist.mask);
781 if (priority != thread->priority)
782 set_running_thread_priority(thread, priority);
680 } 783 }
784}
681 785
682 UNLOCK_THREAD(bl_t); 786void priority_disinherit(struct thread_entry *thread, struct blocker *bl)
683 787{
684 return current; 788 LOCK_THREAD(thread);
789 priority_disinherit_internal(thread, bl->priority);
790 UNLOCK_THREAD(thread);
685} 791}
686 792
687/*--------------------------------------------------------------------------- 793/*---------------------------------------------------------------------------
688 * Readjust priorities when waking a thread blocked waiting for another 794 * Transfer ownership from a single owner to a multi-owner splay from a wait
689 * in essence "releasing" the thread's effect on the object owner. Can be 795 * queue
690 * performed from any context.
691 *--------------------------------------------------------------------------- 796 *---------------------------------------------------------------------------
692 */ 797 */
693struct thread_entry * 798static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
694 wakeup_priority_protocol_release(struct thread_entry *thread)
695{ 799{
696 const int priority = thread->priority; 800 /* All threads will have the same blocker and queue; only we are changing
697 struct blocker *bl = thread->blocker; 801 it now */
698 struct thread_entry * const tstart = thread; 802 struct thread_entry **bqp = thread->bqp;
699 struct thread_entry *bl_t = bl->thread; 803 struct blocker_splay *blsplay = (struct blocker_splay *)thread->blocker;
804 struct thread_entry *blt = blsplay->blocker.thread;
805
806 /* The first thread is already locked and is assumed tagged "multi" */
807 int count = 1;
808 struct thread_entry *temp_queue = NULL;
809
810 /* 'thread' is locked on entry */
811 while (1)
812 {
813 LOCK_THREAD(blt);
700 814
701 /* Blocker cannot change since object will be locked */ 815 remove_from_list_l(bqp, thread);
702 LOCK_THREAD(bl_t); 816 thread->blocker = NULL;
703 817
704 thread->blocker = NULL; /* Thread not blocked */ 818 struct thread_entry *tnext = *bqp;
819 if (tnext == NULL || tnext->retval == 0)
820 break;
705 821
706 for (;;) 822 add_to_list_l(&temp_queue, thread);
707 {
708 struct thread_entry *next;
709 int bl_pr = bl->priority;
710 823
711 if (priority > bl_pr) 824 UNLOCK_THREAD(thread);
712 break; /* Object priority higher */ 825 UNLOCK_THREAD(blt);
713 826
714 next = *thread->bqp; 827 count++;
828 thread = tnext;
715 829
716 if (next == NULL) 830 LOCK_THREAD(thread);
717 { 831 }
718 /* No more threads in queue */
719 prio_subtract_entry(&bl_t->pdist, bl_pr);
720 bl->priority = PRIORITY_IDLE;
721 }
722 else
723 {
724 /* Check list for highest remaining priority */
725 int queue_pr = find_highest_priority_in_list_l(next);
726 832
727 if (queue_pr == bl_pr) 833 int blpr = blsplay->blocker.priority;
728 break; /* Object priority not changing */ 834 priority_disinherit_internal(blt, blpr);
729 835
730 /* Change queue priority */ 836 /* Locking order reverses here since the threads are no longer on the
731 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr); 837 queue side */
732 bl->priority = queue_pr; 838 if (count > 1)
733 } 839 {
840 add_to_list_l(&temp_queue, thread);
841 UNLOCK_THREAD(thread);
842 corelock_lock(&blsplay->cl);
843
844 blpr = find_highest_priority_in_list_l(*bqp);
845 blsplay->blocker.thread = NULL;
734 846
735 if (bl_pr > bl_t->priority) 847 thread = temp_queue;
736 break; /* thread priority is higher */ 848 LOCK_THREAD(thread);
849 }
850 else
851 {
852 /* Becomes a simple, direct transfer */
853 if (thread->priority <= blpr)
854 blpr = find_highest_priority_in_list_l(*bqp);
855 blsplay->blocker.thread = thread;
856 }
737 857
738 bl_pr = find_first_set_bit(bl_t->pdist.mask); 858 blsplay->blocker.priority = blpr;
739 859
740 if (bl_pr == bl_t->priority) 860 while (1)
741 break; /* Thread priority not changing */ 861 {
862 unsigned int slotnum = THREAD_ID_SLOT(thread->id);
863 threadbit_set_bit(&blsplay->mask, slotnum);
742 864
743 if (bl_t->state == STATE_RUNNING) 865 if (blpr < PRIORITY_IDLE)
744 { 866 {
745 /* No further blockers */ 867 prio_add_entry(&thread->pdist, blpr);
746 set_running_thread_priority(bl_t, bl_pr); 868 if (blpr < thread->priority)
747 break; 869 thread->priority = blpr;
748 } 870 }
749 871
750 bl_t->priority = bl_pr; 872 if (count > 1)
751 873 remove_from_list_l(&temp_queue, thread);
752 /* If blocking thread has a blocker, apply transitive inheritance */
753 bl = bl_t->blocker;
754 874
755 if (bl == NULL) 875 core_schedule_wakeup(thread);
756 break; /* End of chain or object doesn't support inheritance */
757
758 next = bl->thread;
759
760 if (UNLIKELY(next == tstart))
761 break; /* Full-circle - deadlock! */
762 876
763 UNLOCK_THREAD(thread); 877 UNLOCK_THREAD(thread);
764 878
765#if NUM_CORES > 1 879 thread = temp_queue;
766 for (;;) 880 if (thread == NULL)
767 { 881 break;
768 LOCK_THREAD(next);
769
770 /* Blocker could change - retest condition */
771 if (LIKELY(bl->thread == next))
772 break;
773 882
774 UNLOCK_THREAD(next); 883 LOCK_THREAD(thread);
775 next = bl->thread;
776 }
777#endif
778 thread = bl_t;
779 bl_t = next;
780 } 884 }
781 885
782 UNLOCK_THREAD(bl_t); 886 UNLOCK_THREAD(blt);
783 887
784#if NUM_CORES > 1 888 if (count > 1)
785 if (UNLIKELY(thread != tstart))
786 { 889 {
787 /* Relock original if it changed */ 890 corelock_unlock(&blsplay->cl);
788 LOCK_THREAD(tstart);
789 } 891 }
790#endif
791 892
792 return cores[CURRENT_CORE].running; 893 blt->retval = count;
793} 894}
794 895
795/*--------------------------------------------------------------------------- 896/*---------------------------------------------------------------------------
@@ -801,67 +902,95 @@ struct thread_entry *
801 * it is the running thread is made. 902 * it is the running thread is made.
802 *--------------------------------------------------------------------------- 903 *---------------------------------------------------------------------------
803 */ 904 */
804struct thread_entry * 905static void wakeup_thread_transfer(struct thread_entry *thread)
805 wakeup_priority_protocol_transfer(struct thread_entry *thread)
806{ 906{
807 /* Waking thread inherits priority boost from object owner */ 907 /* Waking thread inherits priority boost from object owner (blt) */
808 struct blocker *bl = thread->blocker; 908 struct blocker *bl = thread->blocker;
809 struct thread_entry *bl_t = bl->thread; 909 struct thread_entry *blt = bl->thread;
810 struct thread_entry *next;
811 int bl_pr;
812 910
813 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t, 911 THREAD_ASSERT(cores[CURRENT_CORE].running == blt,
814 "UPPT->wrong thread", cores[CURRENT_CORE].running); 912 "UPPT->wrong thread", cores[CURRENT_CORE].running);
815 913
816 LOCK_THREAD(bl_t); 914 LOCK_THREAD(blt);
915
916 struct thread_entry **bqp = thread->bqp;
917 remove_from_list_l(bqp, thread);
918 thread->blocker = NULL;
817 919
818 bl_pr = bl->priority; 920 int blpr = bl->priority;
819 921
820 /* Remove the object's boost from the owning thread */ 922 /* Remove the object's boost from the owning thread */
821 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 && 923 if (prio_subtract_entry(&blt->pdist, blpr) == 0 && blpr <= blt->priority)
822 bl_pr <= bl_t->priority)
823 { 924 {
824 /* No more threads at this priority are waiting and the old level is 925 /* No more threads at this priority are waiting and the old level is
825 * at least the thread level */ 926 * at least the thread level */
826 int priority = find_first_set_bit(bl_t->pdist.mask); 927 int priority = priobit_ffs(&blt->pdist.mask);
827 928 if (priority != blt->priority)
828 if (priority != bl_t->priority) 929 set_running_thread_priority(blt, priority);
829 {
830 /* Adjust this thread's priority */
831 set_running_thread_priority(bl_t, priority);
832 }
833 } 930 }
834 931
835 next = *thread->bqp; 932 struct thread_entry *tnext = *bqp;
836 933
837 if (LIKELY(next == NULL)) 934 if (LIKELY(tnext == NULL))
838 { 935 {
839 /* Expected shortcut - no more waiters */ 936 /* Expected shortcut - no more waiters */
840 bl_pr = PRIORITY_IDLE; 937 blpr = PRIORITY_IDLE;
841 } 938 }
842 else 939 else
843 { 940 {
844 if (thread->priority <= bl_pr) 941 /* If lowering, we need to scan threads remaining in queue */
845 { 942 int priority = thread->priority;
846 /* Need to scan threads remaining in queue */ 943 if (priority <= blpr)
847 bl_pr = find_highest_priority_in_list_l(next); 944 blpr = find_highest_priority_in_list_l(tnext);
848 }
849 945
850 if (prio_add_entry(&thread->pdist, bl_pr) == 1 && 946 if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < priority)
851 bl_pr < thread->priority) 947 thread->priority = blpr; /* Raise new owner */
852 {
853 /* Thread priority must be raised */
854 thread->priority = bl_pr;
855 }
856 } 948 }
857 949
858 bl->thread = thread; /* This thread pwns */ 950 core_schedule_wakeup(thread);
859 bl->priority = bl_pr; /* Save highest blocked priority */ 951 UNLOCK_THREAD(thread);
860 thread->blocker = NULL; /* Thread not blocked */ 952
953 bl->thread = thread; /* This thread pwns */
954 bl->priority = blpr; /* Save highest blocked priority */
955 UNLOCK_THREAD(blt);
956}
957
958/*---------------------------------------------------------------------------
959 * Readjust priorities when waking a thread blocked waiting for another
960 * in essence "releasing" the thread's effect on the object owner. Can be
961 * performed from any context.
962 *---------------------------------------------------------------------------
963 */
964static void wakeup_thread_release(struct thread_entry *thread)
965{
966 struct blocker *bl = thread->blocker;
967 struct thread_entry *blt = lock_blocker_thread(bl);
968 struct thread_entry **bqp = thread->bqp;
969 remove_from_list_l(bqp, thread);
970 thread->blocker = NULL;
971
972 /* Off to see the wizard... */
973 core_schedule_wakeup(thread);
974
975 if (thread->priority > bl->priority)
976 {
977 /* Queue priority won't change */
978 UNLOCK_THREAD(thread);
979 unlock_blocker_thread(bl);
980 return;
981 }
982
983 UNLOCK_THREAD(thread);
861 984
862 UNLOCK_THREAD(bl_t); 985 int newblpr = find_highest_priority_in_list_l(*bqp);
986 if (newblpr == bl->priority)
987 {
988 /* Blocker priority won't change */
989 unlock_blocker_thread(bl);
990 return;
991 }
863 992
864 return bl_t; 993 inherit_priority(bl, bl, blt, newblpr);
865} 994}
866 995
867/*--------------------------------------------------------------------------- 996/*---------------------------------------------------------------------------
@@ -877,9 +1006,8 @@ static void __attribute__((noinline)) check_for_obj_waiters(
877{ 1006{
878 /* Only one bit in the mask should be set with a frequency on 1 which 1007 /* Only one bit in the mask should be set with a frequency on 1 which
879 * represents the thread's own base priority */ 1008 * represents the thread's own base priority */
880 uint32_t mask = thread->pdist.mask; 1009 if (priobit_popcount(&thread->pdist.mask) != 1 ||
881 if ((mask & (mask - 1)) != 0 || 1010 thread->pdist.hist[priobit_ffs(&thread->pdist.mask)] > 1)
882 thread->pdist.hist[find_first_set_bit(mask)] > 1)
883 { 1011 {
884 unsigned char name[32]; 1012 unsigned char name[32];
885 thread_get_name(name, 32, thread); 1013 thread_get_name(name, 32, thread);
@@ -889,26 +1017,72 @@ static void __attribute__((noinline)) check_for_obj_waiters(
889#endif /* HAVE_PRIORITY_SCHEDULING */ 1017#endif /* HAVE_PRIORITY_SCHEDULING */
890 1018
891/*--------------------------------------------------------------------------- 1019/*---------------------------------------------------------------------------
892 * Move a thread back to a running state on its core. 1020 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1021 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1022 *
1023 * This code should be considered a critical section by the caller meaning
1024 * that the object's corelock should be held.
1025 *
1026 * INTERNAL: Intended for use by kernel objects and not for programs.
893 *--------------------------------------------------------------------------- 1027 *---------------------------------------------------------------------------
894 */ 1028 */
895static void core_schedule_wakeup(struct thread_entry *thread) 1029unsigned int wakeup_thread_(struct thread_entry **list
1030 IF_PRIO(, enum wakeup_thread_protocol proto))
896{ 1031{
897 const unsigned int core = IF_COP_CORE(thread->core); 1032 struct thread_entry *thread = *list;
898 1033
899 RTR_LOCK(core); 1034 /* Check if there is a blocked thread at all. */
1035 if (*list == NULL)
1036 return THREAD_NONE;
900 1037
901 thread->state = STATE_RUNNING; 1038 LOCK_THREAD(thread);
902 1039
903 add_to_list_l(&cores[core].running, thread); 1040 /* Determine thread's current state. */
904 rtr_add_entry(core, thread->priority); 1041 switch (thread->state)
1042 {
1043 case STATE_BLOCKED:
1044 case STATE_BLOCKED_W_TMO:
1045#ifdef HAVE_PRIORITY_SCHEDULING
1046 /* Threads with PIP blockers cannot specify "WAKEUP_DEFAULT" */
1047 if (thread->blocker != NULL)
1048 {
1049 static void (* const funcs[])(struct thread_entry *thread)
1050 ICONST_ATTR =
1051 {
1052 [WAKEUP_DEFAULT] = NULL,
1053 [WAKEUP_TRANSFER] = wakeup_thread_transfer,
1054 [WAKEUP_RELEASE] = wakeup_thread_release,
1055 [WAKEUP_TRANSFER_MULTI] = wakeup_thread_queue_multi_transfer,
1056 };
1057
1058 /* Call the specified unblocking PIP (does the rest) */
1059 funcs[proto](thread);
1060 }
1061 else
1062#endif /* HAVE_PRIORITY_SCHEDULING */
1063 {
1064 /* No PIP - just boost the thread by aging */
1065#ifdef HAVE_PRIORITY_SCHEDULING
1066 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1067 thread->skip_count = thread->priority;
1068#endif /* HAVE_PRIORITY_SCHEDULING */
1069 remove_from_list_l(list, thread);
1070 core_schedule_wakeup(thread);
1071 UNLOCK_THREAD(thread);
1072 }
905 1073
906 RTR_UNLOCK(core); 1074 return should_switch_tasks();
907 1075
908#if NUM_CORES > 1 1076 /* Nothing to do. State is not blocked. */
909 if (core != CURRENT_CORE) 1077 default:
910 core_wake(core); 1078#if THREAD_EXTRA_CHECKS
1079 THREAD_PANICF("wakeup_thread->block invalid", thread);
1080 case STATE_RUNNING:
1081 case STATE_KILLED:
911#endif 1082#endif
1083 UNLOCK_THREAD(thread);
1084 return THREAD_NONE;
1085 }
912} 1086}
913 1087
914/*--------------------------------------------------------------------------- 1088/*---------------------------------------------------------------------------
@@ -990,8 +1164,6 @@ void check_tmo_threads(void)
990 } 1164 }
991#endif /* NUM_CORES */ 1165#endif /* NUM_CORES */
992 1166
993 remove_from_list_l(curr->bqp, curr);
994
995#ifdef HAVE_WAKEUP_EXT_CB 1167#ifdef HAVE_WAKEUP_EXT_CB
996 if (curr->wakeup_ext_cb != NULL) 1168 if (curr->wakeup_ext_cb != NULL)
997 curr->wakeup_ext_cb(curr); 1169 curr->wakeup_ext_cb(curr);
@@ -999,8 +1171,11 @@ void check_tmo_threads(void)
999 1171
1000#ifdef HAVE_PRIORITY_SCHEDULING 1172#ifdef HAVE_PRIORITY_SCHEDULING
1001 if (curr->blocker != NULL) 1173 if (curr->blocker != NULL)
1002 wakeup_priority_protocol_release(curr); 1174 wakeup_thread_release(curr);
1175 else
1003#endif 1176#endif
1177 remove_from_list_l(curr->bqp, curr);
1178
1004 corelock_unlock(ocl); 1179 corelock_unlock(ocl);
1005 } 1180 }
1006 /* else state == STATE_SLEEPING */ 1181 /* else state == STATE_SLEEPING */
@@ -1161,8 +1336,7 @@ void switch_thread(void)
1161 /* Begin task switching by saving our current context so that we can 1336 /* Begin task switching by saving our current context so that we can
1162 * restore the state of the current thread later to the point prior 1337 * restore the state of the current thread later to the point prior
1163 * to this call. */ 1338 * to this call. */
1164 store_context(&thread->context); 1339 thread_store_context(thread);
1165
1166#ifdef DEBUG 1340#ifdef DEBUG
1167 /* Check core_ctx buflib integrity */ 1341 /* Check core_ctx buflib integrity */
1168 core_check_valid(); 1342 core_check_valid();
@@ -1212,8 +1386,7 @@ void switch_thread(void)
1212 /* Select the new task based on priorities and the last time a 1386 /* Select the new task based on priorities and the last time a
1213 * process got CPU time relative to the highest priority runnable 1387 * process got CPU time relative to the highest priority runnable
1214 * task. */ 1388 * task. */
1215 struct priority_distribution *pd = &cores[core].rtr; 1389 int max = priobit_ffs(&cores[core].rtr.mask);
1216 int max = find_first_set_bit(pd->mask);
1217 1390
1218 if (block == NULL) 1391 if (block == NULL)
1219 { 1392 {
@@ -1269,7 +1442,7 @@ void switch_thread(void)
1269 } 1442 }
1270 1443
1271 /* And finally give control to the next thread. */ 1444 /* And finally give control to the next thread. */
1272 load_context(&thread->context); 1445 thread_load_context(thread);
1273 1446
1274#ifdef RB_PROFILE 1447#ifdef RB_PROFILE
1275 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); 1448 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
@@ -1291,140 +1464,59 @@ void sleep_thread(int ticks)
1291 LOCK_THREAD(current); 1464 LOCK_THREAD(current);
1292 1465
1293 /* Set our timeout, remove from run list and join timeout list. */ 1466 /* Set our timeout, remove from run list and join timeout list. */
1294 current->tmo_tick = current_tick + ticks + 1; 1467 current->tmo_tick = current_tick + MAX(ticks, 0) + 1;
1295 block_thread_on_l(current, STATE_SLEEPING); 1468 block_thread_on_l(current, STATE_SLEEPING);
1296 1469
1297 UNLOCK_THREAD(current); 1470 UNLOCK_THREAD(current);
1298} 1471}
1299 1472
1300/*--------------------------------------------------------------------------- 1473/*---------------------------------------------------------------------------
1301 * Indefinitely block a thread on a blocking queue for explicit wakeup. 1474 * Block a thread on a blocking queue for explicit wakeup. If timeout is
1475 * negative, the block is infinite.
1302 * 1476 *
1303 * INTERNAL: Intended for use by kernel objects and not for programs. 1477 * INTERNAL: Intended for use by kernel objects and not for programs.
1304 *--------------------------------------------------------------------------- 1478 *---------------------------------------------------------------------------
1305 */ 1479 */
1306void block_thread(struct thread_entry *current) 1480void block_thread(struct thread_entry *current, int timeout)
1307{ 1481{
1308 /* Set the state to blocked and take us off of the run queue until we
1309 * are explicitly woken */
1310 LOCK_THREAD(current); 1482 LOCK_THREAD(current);
1311 1483
1312 /* Set the list for explicit wakeup */ 1484 struct blocker *bl = NULL;
1313 block_thread_on_l(current, STATE_BLOCKED);
1314
1315#ifdef HAVE_PRIORITY_SCHEDULING 1485#ifdef HAVE_PRIORITY_SCHEDULING
1316 if (current->blocker != NULL) 1486 bl = current->blocker;
1487 struct thread_entry *blt = bl ? lock_blocker_thread(bl) : NULL;
1488#endif /* HAVE_PRIORITY_SCHEDULING */
1489
1490 if (LIKELY(timeout < 0))
1317 { 1491 {
1318 /* Object supports PIP */ 1492 /* Block until explicitly woken */
1319 current = blocker_inherit_priority(current); 1493 block_thread_on_l(current, STATE_BLOCKED);
1320 } 1494 }
1321#endif 1495 else
1322
1323 UNLOCK_THREAD(current);
1324}
1325
1326/*---------------------------------------------------------------------------
1327 * Block a thread on a blocking queue for a specified time interval or until
1328 * explicitly woken - whichever happens first.
1329 *
1330 * INTERNAL: Intended for use by kernel objects and not for programs.
1331 *---------------------------------------------------------------------------
1332 */
1333void block_thread_w_tmo(struct thread_entry *current, int timeout)
1334{
1335 /* Get the entry for the current running thread. */
1336 LOCK_THREAD(current);
1337
1338 /* Set the state to blocked with the specified timeout */
1339 current->tmo_tick = current_tick + timeout;
1340
1341 /* Set the list for explicit wakeup */
1342 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1343
1344#ifdef HAVE_PRIORITY_SCHEDULING
1345 if (current->blocker != NULL)
1346 { 1496 {
1347 /* Object supports PIP */ 1497 /* Set the state to blocked with the specified timeout */
1348 current = blocker_inherit_priority(current); 1498 current->tmo_tick = current_tick + timeout;
1499 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1349 } 1500 }
1350#endif
1351 1501
1352 UNLOCK_THREAD(current); 1502 if (bl == NULL)
1353}
1354
1355/*---------------------------------------------------------------------------
1356 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1357 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1358 *
1359 * This code should be considered a critical section by the caller meaning
1360 * that the object's corelock should be held.
1361 *
1362 * INTERNAL: Intended for use by kernel objects and not for programs.
1363 *---------------------------------------------------------------------------
1364 */
1365unsigned int wakeup_thread(struct thread_entry **list)
1366{
1367 struct thread_entry *thread = *list;
1368 unsigned int result = THREAD_NONE;
1369
1370 /* Check if there is a blocked thread at all. */
1371 if (thread == NULL)
1372 return result;
1373
1374 LOCK_THREAD(thread);
1375
1376 /* Determine thread's current state. */
1377 switch (thread->state)
1378 { 1503 {
1379 case STATE_BLOCKED: 1504 UNLOCK_THREAD(current);
1380 case STATE_BLOCKED_W_TMO: 1505 return;
1381 remove_from_list_l(list, thread); 1506 }
1382
1383 result = THREAD_OK;
1384 1507
1385#ifdef HAVE_PRIORITY_SCHEDULING 1508#ifdef HAVE_PRIORITY_SCHEDULING
1386 struct thread_entry *current; 1509 int newblpr = current->priority;
1387 struct blocker *bl = thread->blocker; 1510 UNLOCK_THREAD(current);
1388
1389 if (bl == NULL)
1390 {
1391 /* No inheritance - just boost the thread by aging */
1392 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1393 thread->skip_count = thread->priority;
1394 current = cores[CURRENT_CORE].running;
1395 }
1396 else
1397 {
1398 /* Call the specified unblocking PIP */
1399 current = bl->wakeup_protocol(thread);
1400 }
1401
1402 if (current != NULL &&
1403 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1404 < current->priority)
1405 {
1406 /* There is a thread ready to run of higher or same priority on
1407 * the same core as the current one; recommend a task switch.
1408 * Knowing if this is an interrupt call would be helpful here. */
1409 result |= THREAD_SWITCH;
1410 }
1411#endif /* HAVE_PRIORITY_SCHEDULING */
1412
1413 core_schedule_wakeup(thread);
1414 break;
1415 1511
1416 /* Nothing to do. State is not blocked. */ 1512 if (newblpr >= bl->priority)
1417#if THREAD_EXTRA_CHECKS 1513 {
1418 default: 1514 unlock_blocker_thread(bl);
1419 THREAD_PANICF("wakeup_thread->block invalid", thread); 1515 return; /* Queue priority won't change */
1420 case STATE_RUNNING:
1421 case STATE_KILLED:
1422 break;
1423#endif
1424 } 1516 }
1425 1517
1426 UNLOCK_THREAD(thread); 1518 inherit_priority(bl, bl, blt, newblpr);
1427 return result; 1519#endif /* HAVE_PRIORITY_SCHEDULING */
1428} 1520}
1429 1521
1430/*--------------------------------------------------------------------------- 1522/*---------------------------------------------------------------------------
@@ -1435,25 +1527,31 @@ unsigned int wakeup_thread(struct thread_entry **list)
1435 * INTERNAL: Intended for use by kernel objects and not for programs. 1527 * INTERNAL: Intended for use by kernel objects and not for programs.
1436 *--------------------------------------------------------------------------- 1528 *---------------------------------------------------------------------------
1437 */ 1529 */
1438unsigned int thread_queue_wake(struct thread_entry **list) 1530unsigned int thread_queue_wake(struct thread_entry **list,
1531 volatile int *count)
1439{ 1532{
1533 int num = 0;
1440 unsigned result = THREAD_NONE; 1534 unsigned result = THREAD_NONE;
1441 1535
1442 for (;;) 1536 for (;;)
1443 { 1537 {
1444 unsigned int rc = wakeup_thread(list); 1538 unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
1445 1539
1446 if (rc == THREAD_NONE) 1540 if (rc == THREAD_NONE)
1447 break; /* No more threads */ 1541 break; /* No more threads */
1448 1542
1449 result |= rc; 1543 result |= rc;
1544 num++;
1450 } 1545 }
1451 1546
1547 if (count)
1548 *count = num;
1549
1452 return result; 1550 return result;
1453} 1551}
1454 1552
1455/*--------------------------------------------------------------------------- 1553/*---------------------------------------------------------------------------
1456 * Assign the thread slot a new ID. Version is 1-255. 1554 * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
1457 *--------------------------------------------------------------------------- 1555 *---------------------------------------------------------------------------
1458 */ 1556 */
1459static void new_thread_id(unsigned int slot_num, 1557static void new_thread_id(unsigned int slot_num,
@@ -1693,7 +1791,7 @@ void thread_wait(unsigned int thread_id)
1693 current->bqp = &thread->queue; 1791 current->bqp = &thread->queue;
1694 1792
1695 disable_irq(); 1793 disable_irq();
1696 block_thread(current); 1794 block_thread(current, TIMEOUT_BLOCK);
1697 1795
1698 corelock_unlock(&thread->waiter_cl); 1796 corelock_unlock(&thread->waiter_cl);
1699 1797
@@ -1723,7 +1821,7 @@ static inline void thread_final_exit(struct thread_entry *current)
1723 * execution except the slot itself. */ 1821 * execution except the slot itself. */
1724 1822
1725 /* Signal this thread */ 1823 /* Signal this thread */
1726 thread_queue_wake(&current->queue); 1824 thread_queue_wake(&current->queue, NULL);
1727 corelock_unlock(&current->waiter_cl); 1825 corelock_unlock(&current->waiter_cl);
1728 switch_thread(); 1826 switch_thread();
1729 /* This should never and must never be reached - if it is, the 1827 /* This should never and must never be reached - if it is, the
@@ -1912,20 +2010,18 @@ IF_COP( retry_state: )
1912 } 2010 }
1913 } 2011 }
1914#endif 2012#endif
1915 remove_from_list_l(thread->bqp, thread);
1916
1917#ifdef HAVE_WAKEUP_EXT_CB 2013#ifdef HAVE_WAKEUP_EXT_CB
1918 if (thread->wakeup_ext_cb != NULL) 2014 if (thread->wakeup_ext_cb != NULL)
1919 thread->wakeup_ext_cb(thread); 2015 thread->wakeup_ext_cb(thread);
1920#endif 2016#endif
1921 2017
1922#ifdef HAVE_PRIORITY_SCHEDULING 2018#ifdef HAVE_PRIORITY_SCHEDULING
2019 /* Remove thread's priority influence from its chain if needed */
1923 if (thread->blocker != NULL) 2020 if (thread->blocker != NULL)
1924 {
1925 /* Remove thread's priority influence from its chain */
1926 wakeup_priority_protocol_release(thread); 2021 wakeup_priority_protocol_release(thread);
1927 } 2022 else
1928#endif 2023#endif
2024 remove_from_list_l(thread->bqp, thread);
1929 2025
1930#if NUM_CORES > 1 2026#if NUM_CORES > 1
1931 if (ocl != NULL) 2027 if (ocl != NULL)
@@ -1970,130 +2066,77 @@ thread_killed: /* Thread was already killed */
1970 */ 2066 */
1971int thread_set_priority(unsigned int thread_id, int priority) 2067int thread_set_priority(unsigned int thread_id, int priority)
1972{ 2068{
2069 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
2070 return -1; /* Invalid priority argument */
2071
1973 int old_base_priority = -1; 2072 int old_base_priority = -1;
1974 struct thread_entry *thread = thread_id_entry(thread_id); 2073 struct thread_entry *thread = thread_id_entry(thread_id);
1975 2074
1976 /* A little safety measure */
1977 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1978 return -1;
1979
1980 /* Thread could be on any list and therefore on an interrupt accessible 2075 /* Thread could be on any list and therefore on an interrupt accessible
1981 one - disable interrupts */ 2076 one - disable interrupts */
1982 int oldlevel = disable_irq_save(); 2077 const int oldlevel = disable_irq_save();
1983
1984 LOCK_THREAD(thread); 2078 LOCK_THREAD(thread);
1985 2079
1986 /* Make sure it's not killed */ 2080 if (thread->id != thread_id || thread->state == STATE_KILLED)
1987 if (thread->id == thread_id && thread->state != STATE_KILLED) 2081 goto done; /* Invalid thread */
1988 {
1989 int old_priority = thread->priority;
1990
1991 old_base_priority = thread->base_priority;
1992 thread->base_priority = priority;
1993
1994 prio_move_entry(&thread->pdist, old_base_priority, priority);
1995 priority = find_first_set_bit(thread->pdist.mask);
1996
1997 if (old_priority == priority)
1998 {
1999 /* No priority change - do nothing */
2000 }
2001 else if (thread->state == STATE_RUNNING)
2002 {
2003 /* This thread is running - change location on the run
2004 * queue. No transitive inheritance needed. */
2005 set_running_thread_priority(thread, priority);
2006 }
2007 else
2008 {
2009 thread->priority = priority;
2010
2011 if (thread->blocker != NULL)
2012 {
2013 /* Bubble new priority down the chain */
2014 struct blocker *bl = thread->blocker; /* Blocker struct */
2015 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2016 struct thread_entry * const tstart = thread; /* Initial thread */
2017 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2018
2019 for (;;)
2020 {
2021 struct thread_entry *next; /* Next thread to check */
2022 int bl_pr; /* Highest blocked thread */
2023 int queue_pr; /* New highest blocked thread */
2024#if NUM_CORES > 1
2025 /* Owner can change but thread cannot be dislodged - thread
2026 * may not be the first in the queue which allows other
2027 * threads ahead in the list to be given ownership during the
2028 * operation. If thread is next then the waker will have to
2029 * wait for us and the owner of the object will remain fixed.
2030 * If we successfully grab the owner -- which at some point
2031 * is guaranteed -- then the queue remains fixed until we
2032 * pass by. */
2033 for (;;)
2034 {
2035 LOCK_THREAD(bl_t);
2036
2037 /* Double-check the owner - retry if it changed */
2038 if (LIKELY(bl->thread == bl_t))
2039 break;
2040
2041 UNLOCK_THREAD(bl_t);
2042 bl_t = bl->thread;
2043 }
2044#endif
2045 bl_pr = bl->priority;
2046
2047 if (highest > bl_pr)
2048 break; /* Object priority won't change */
2049 2082
2050 /* This will include the thread being set */ 2083 old_base_priority = thread->base_priority;
2051 queue_pr = find_highest_priority_in_list_l(*thread->bqp); 2084 if (priority == old_base_priority)
2085 goto done; /* No base priority change */
2052 2086
2053 if (queue_pr == bl_pr) 2087 thread->base_priority = priority;
2054 break; /* Object priority not changing */
2055 2088
2056 /* Update thread boost for this object */ 2089 /* Adjust the thread's priority influence on itself */
2057 bl->priority = queue_pr; 2090 prio_move_entry(&thread->pdist, old_base_priority, priority);
2058 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2059 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2060 2091
2061 if (bl_t->priority == bl_pr) 2092 int old_priority = thread->priority;
2062 break; /* Blocking thread priority not changing */ 2093 int new_priority = priobit_ffs(&thread->pdist.mask);
2063 2094
2064 if (bl_t->state == STATE_RUNNING) 2095 if (old_priority == new_priority)
2065 { 2096 goto done; /* No running priority change */
2066 /* Thread not blocked - we're done */
2067 set_running_thread_priority(bl_t, bl_pr);
2068 break;
2069 }
2070 2097
2071 bl_t->priority = bl_pr; 2098 if (thread->state == STATE_RUNNING)
2072 bl = bl_t->blocker; /* Blocking thread has a blocker? */ 2099 {
2100 /* This thread is running - just change location on the run queue.
2101 Also sets thread->priority. */
2102 set_running_thread_priority(thread, new_priority);
2103 goto done;
2104 }
2073 2105
2074 if (bl == NULL) 2106 /* Thread is blocked */
2075 break; /* End of chain */ 2107 struct blocker *bl = thread->blocker;
2108 if (bl == NULL)
2109 {
2110 thread->priority = new_priority;
2111 goto done; /* End of transitive blocks */
2112 }
2076 2113
2077 next = bl->thread; 2114 struct thread_entry *blt = lock_blocker_thread(bl);
2115 struct thread_entry **bqp = thread->bqp;
2078 2116
2079 if (UNLIKELY(next == tstart)) 2117 thread->priority = new_priority;
2080 break; /* Full-circle */
2081 2118
2082 UNLOCK_THREAD(thread); 2119 UNLOCK_THREAD(thread);
2120 thread = NULL;
2083 2121
2084 thread = bl_t; 2122 int oldblpr = bl->priority;
2085 bl_t = next; 2123 int newblpr = oldblpr;
2086 } /* for (;;) */ 2124 if (new_priority < oldblpr)
2125 newblpr = new_priority;
2126 else if (old_priority <= oldblpr)
2127 newblpr = find_highest_priority_in_list_l(*bqp);
2087 2128
2088 UNLOCK_THREAD(bl_t); 2129 if (newblpr == oldblpr)
2089 } 2130 {
2090 } 2131 unlock_blocker_thread(bl);
2132 goto done;
2091 } 2133 }
2092 2134
2093 UNLOCK_THREAD(thread); 2135 inherit_priority(bl, bl, blt, newblpr);
2094 2136done:
2137 if (thread)
2138 UNLOCK_THREAD(thread);
2095 restore_irq(oldlevel); 2139 restore_irq(oldlevel);
2096
2097 return old_base_priority; 2140 return old_base_priority;
2098} 2141}
2099 2142
diff --git a/firmware/target/hosted/sdl/thread-sdl.c b/firmware/target/hosted/sdl/thread-sdl.c
index c17e793833..eaf59e245d 100644
--- a/firmware/target/hosted/sdl/thread-sdl.c
+++ b/firmware/target/hosted/sdl/thread-sdl.c
@@ -406,20 +406,20 @@ void sleep_thread(int ticks)
406 current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem; 406 current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
407} 407}
408 408
409void block_thread(struct thread_entry *current) 409void block_thread(struct thread_entry *current, int ticks)
410{ 410{
411 current->state = STATE_BLOCKED; 411 if (ticks < 0)
412 add_to_list_l(current->bqp, current); 412 current->state = STATE_BLOCKED;
413} 413 else
414 {
415 current->state = STATE_BLOCKED_W_TMO;
416 current->tmo_tick = (1000/HZ)*ticks;
417 }
414 418
415void block_thread_w_tmo(struct thread_entry *current, int ticks)
416{
417 current->state = STATE_BLOCKED_W_TMO;
418 current->tmo_tick = (1000/HZ)*ticks;
419 add_to_list_l(current->bqp, current); 419 add_to_list_l(current->bqp, current);
420} 420}
421 421
422unsigned int wakeup_thread(struct thread_entry **list) 422unsigned int wakeup_thread_(struct thread_entry **list)
423{ 423{
424 struct thread_entry *thread = *list; 424 struct thread_entry *thread = *list;
425 425
@@ -439,20 +439,26 @@ unsigned int wakeup_thread(struct thread_entry **list)
439 return THREAD_NONE; 439 return THREAD_NONE;
440} 440}
441 441
442unsigned int thread_queue_wake(struct thread_entry **list) 442unsigned int thread_queue_wake(struct thread_entry **list,
443 volatile int *count)
443{ 444{
444 unsigned int result = THREAD_NONE; 445 unsigned int result = THREAD_NONE;
446 int num = 0;
445 447
446 for (;;) 448 for (;;)
447 { 449 {
448 unsigned int rc = wakeup_thread(list); 450 unsigned int rc = wakeup_thread_(list);
449 451
450 if (rc == THREAD_NONE) 452 if (rc == THREAD_NONE)
451 break; 453 break;
452 454
453 result |= rc; 455 result |= rc;
456 num++;
454 } 457 }
455 458
459 if (count)
460 *count = num;
461
456 return result; 462 return result;
457} 463}
458 464
@@ -615,7 +621,7 @@ void remove_thread(unsigned int thread_id)
615 621
616 new_thread_id(thread->id, thread); 622 new_thread_id(thread->id, thread);
617 thread->state = STATE_KILLED; 623 thread->state = STATE_KILLED;
618 thread_queue_wake(&thread->queue); 624 thread_queue_wake(&thread->queue, NULL);
619 625
620 SDL_DestroySemaphore(s); 626 SDL_DestroySemaphore(s);
621 627
@@ -652,7 +658,7 @@ void thread_wait(unsigned int thread_id)
652 if (thread->id == thread_id && thread->state != STATE_KILLED) 658 if (thread->id == thread_id && thread->state != STATE_KILLED)
653 { 659 {
654 current->bqp = &thread->queue; 660 current->bqp = &thread->queue;
655 block_thread(current); 661 block_thread(current, TIMEOUT_BLOCK);
656 switch_thread(); 662 switch_thread();
657 } 663 }
658} 664}