diff options
Diffstat (limited to 'firmware/kernel')
-rw-r--r-- | firmware/kernel/include/mrsw_lock.h | 7 | ||||
-rw-r--r-- | firmware/kernel/include/mutex.h | 12 | ||||
-rw-r--r-- | firmware/kernel/include/queue.h | 4 | ||||
-rw-r--r-- | firmware/kernel/include/semaphore.h | 8 | ||||
-rw-r--r-- | firmware/kernel/include/thread.h | 18 | ||||
-rw-r--r-- | firmware/kernel/mrsw_lock.c | 103 | ||||
-rw-r--r-- | firmware/kernel/mutex.c | 28 | ||||
-rw-r--r-- | firmware/kernel/pthread/thread.c | 81 | ||||
-rw-r--r-- | firmware/kernel/queue.c | 145 | ||||
-rw-r--r-- | firmware/kernel/semaphore.c | 59 | ||||
-rw-r--r-- | firmware/kernel/thread-common.c | 247 | ||||
-rw-r--r-- | firmware/kernel/thread-internal.h | 407 | ||||
-rw-r--r-- | firmware/kernel/thread.c | 1837 |
13 files changed, 1327 insertions, 1629 deletions
diff --git a/firmware/kernel/include/mrsw_lock.h b/firmware/kernel/include/mrsw_lock.h index d919f7be26..7511f87e93 100644 --- a/firmware/kernel/include/mrsw_lock.h +++ b/firmware/kernel/include/mrsw_lock.h | |||
@@ -39,10 +39,9 @@ | |||
39 | */ | 39 | */ |
40 | struct mrsw_lock | 40 | struct mrsw_lock |
41 | { | 41 | { |
42 | int volatile count; /* rd/wr counter; >0 = reader(s), <0 = writer */ | 42 | int volatile count; /* counter; >0 = reader(s), <0 = writer */ |
43 | struct thread_entry *queue; | 43 | struct __wait_queue queue; /* waiter list */ |
44 | struct blocker_splay splay; /* priority inheritance info | 44 | struct blocker_splay splay; /* priority inheritance/owner info */ |
45 | for waiters */ | ||
46 | uint8_t rdrecursion[MAXTHREADS]; /* per-thread reader recursion counts */ | 45 | uint8_t rdrecursion[MAXTHREADS]; /* per-thread reader recursion counts */ |
47 | IF_COP( struct corelock cl; ) | 46 | IF_COP( struct corelock cl; ) |
48 | }; | 47 | }; |
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h index 72736ec8fd..b74bfe23f5 100644 --- a/firmware/kernel/include/mutex.h +++ b/firmware/kernel/include/mutex.h | |||
@@ -26,13 +26,13 @@ | |||
26 | 26 | ||
27 | struct mutex | 27 | struct mutex |
28 | { | 28 | { |
29 | struct thread_entry *queue; /* waiter list */ | 29 | struct __wait_queue queue; /* waiter list */ |
30 | int recursion; /* lock owner recursion count */ | 30 | int recursion; /* lock owner recursion count */ |
31 | struct blocker blocker; /* priority inheritance info | 31 | struct blocker blocker; /* priority inheritance info |
32 | for waiters and owner*/ | 32 | for waiters and owner*/ |
33 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | 33 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ |
34 | #ifdef HAVE_PRIORITY_SCHEDULING | 34 | #ifdef HAVE_PRIORITY_SCHEDULING |
35 | bool no_preempt; | 35 | bool no_preempt; |
36 | #endif | 36 | #endif |
37 | }; | 37 | }; |
38 | 38 | ||
diff --git a/firmware/kernel/include/queue.h b/firmware/kernel/include/queue.h index 3f24598d5b..afee4c90ff 100644 --- a/firmware/kernel/include/queue.h +++ b/firmware/kernel/include/queue.h | |||
@@ -88,7 +88,7 @@ struct queue_sender_list | |||
88 | /* If non-NULL, there is a thread waiting for the corresponding event */ | 88 | /* If non-NULL, there is a thread waiting for the corresponding event */ |
89 | /* Must be statically allocated to put in non-cached ram. */ | 89 | /* Must be statically allocated to put in non-cached ram. */ |
90 | struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */ | 90 | struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */ |
91 | struct thread_entry *list; /* list of senders in map */ | 91 | struct __wait_queue list; /* list of senders in map */ |
92 | /* Send info for last message dequeued or NULL if replied or not sent */ | 92 | /* Send info for last message dequeued or NULL if replied or not sent */ |
93 | struct thread_entry * volatile curr_sender; | 93 | struct thread_entry * volatile curr_sender; |
94 | #ifdef HAVE_PRIORITY_SCHEDULING | 94 | #ifdef HAVE_PRIORITY_SCHEDULING |
@@ -108,7 +108,7 @@ struct queue_sender_list | |||
108 | 108 | ||
109 | struct event_queue | 109 | struct event_queue |
110 | { | 110 | { |
111 | struct thread_entry *queue; /* waiter list */ | 111 | struct __wait_queue queue; /* waiter list */ |
112 | struct queue_event events[QUEUE_LENGTH]; /* list of events */ | 112 | struct queue_event events[QUEUE_LENGTH]; /* list of events */ |
113 | unsigned int volatile read; /* head of queue */ | 113 | unsigned int volatile read; /* head of queue */ |
114 | unsigned int volatile write; /* tail of queue */ | 114 | unsigned int volatile write; /* tail of queue */ |
diff --git a/firmware/kernel/include/semaphore.h b/firmware/kernel/include/semaphore.h index 16095d9c2d..1d604a4e76 100644 --- a/firmware/kernel/include/semaphore.h +++ b/firmware/kernel/include/semaphore.h | |||
@@ -26,10 +26,10 @@ | |||
26 | 26 | ||
27 | struct semaphore | 27 | struct semaphore |
28 | { | 28 | { |
29 | struct thread_entry *queue; /* Waiter list */ | 29 | struct __wait_queue queue; /* Waiter list */ |
30 | int volatile count; /* # of waits remaining before unsignaled */ | 30 | int volatile count; /* # of waits remaining before unsignaled */ |
31 | int max; /* maximum # of waits to remain signaled */ | 31 | int max; /* maximum # of waits to remain signaled */ |
32 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | 32 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ |
33 | }; | 33 | }; |
34 | 34 | ||
35 | extern void semaphore_init(struct semaphore *s, int max, int start); | 35 | extern void semaphore_init(struct semaphore *s, int max, int start); |
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h index 5a8bff0107..dfb632785e 100644 --- a/firmware/kernel/include/thread.h +++ b/firmware/kernel/include/thread.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <stdbool.h> | 26 | #include <stdbool.h> |
27 | #include "config.h" | 27 | #include "config.h" |
28 | #include "gcc_extensions.h" | 28 | #include "gcc_extensions.h" |
29 | #include "linked_list.h" | ||
29 | #include "bitarray.h" | 30 | #include "bitarray.h" |
30 | #include "corelock.h" | 31 | #include "corelock.h" |
31 | 32 | ||
@@ -52,7 +53,7 @@ | |||
52 | #define PRIORITY_REALTIME_4 4 | 53 | #define PRIORITY_REALTIME_4 4 |
53 | #define PRIORITY_REALTIME 4 /* Lowest realtime range */ | 54 | #define PRIORITY_REALTIME 4 /* Lowest realtime range */ |
54 | #define PRIORITY_BUFFERING 15 /* Codec buffering thread */ | 55 | #define PRIORITY_BUFFERING 15 /* Codec buffering thread */ |
55 | #define PRIORITY_USER_INTERFACE 16 /* The main thread */ | 56 | #define PRIORITY_USER_INTERFACE 16 /* For most UI thrads */ |
56 | #define PRIORITY_RECORDING 16 /* Recording thread */ | 57 | #define PRIORITY_RECORDING 16 /* Recording thread */ |
57 | #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */ | 58 | #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */ |
58 | #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */ | 59 | #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */ |
@@ -61,6 +62,7 @@ | |||
61 | #define NUM_PRIORITIES 32 | 62 | #define NUM_PRIORITIES 32 |
62 | #define PRIORITY_IDLE 32 /* Priority representative of no tasks */ | 63 | #define PRIORITY_IDLE 32 /* Priority representative of no tasks */ |
63 | 64 | ||
65 | #define PRIORITY_MAIN_THREAD PRIORITY_USER_INTERFACE | ||
64 | #define IO_PRIORITY_IMMEDIATE 0 | 66 | #define IO_PRIORITY_IMMEDIATE 0 |
65 | #define IO_PRIORITY_BACKGROUND 32 | 67 | #define IO_PRIORITY_BACKGROUND 32 |
66 | 68 | ||
@@ -108,6 +110,9 @@ extern unsigned sleep(unsigned ticks); | |||
108 | #define IFN_PRIO(...) __VA_ARGS__ | 110 | #define IFN_PRIO(...) __VA_ARGS__ |
109 | #endif | 111 | #endif |
110 | 112 | ||
113 | #define __wait_queue lld_head | ||
114 | #define __wait_queue_node lld_node | ||
115 | |||
111 | /* Basic structure describing the owner of an object */ | 116 | /* Basic structure describing the owner of an object */ |
112 | struct blocker | 117 | struct blocker |
113 | { | 118 | { |
@@ -168,6 +173,7 @@ int thread_get_priority(unsigned int thread_id); | |||
168 | void thread_set_io_priority(unsigned int thread_id, int io_priority); | 173 | void thread_set_io_priority(unsigned int thread_id, int io_priority); |
169 | int thread_get_io_priority(unsigned int thread_id); | 174 | int thread_get_io_priority(unsigned int thread_id); |
170 | #endif /* HAVE_IO_PRIORITY */ | 175 | #endif /* HAVE_IO_PRIORITY */ |
176 | |||
171 | #if NUM_CORES > 1 | 177 | #if NUM_CORES > 1 |
172 | unsigned int switch_core(unsigned int new_core); | 178 | unsigned int switch_core(unsigned int new_core); |
173 | #endif | 179 | #endif |
@@ -186,11 +192,21 @@ int core_get_debug_info(unsigned int core, struct core_debug_info *infop); | |||
186 | 192 | ||
187 | #endif /* NUM_CORES */ | 193 | #endif /* NUM_CORES */ |
188 | 194 | ||
195 | #ifdef HAVE_SDL_THREADS | ||
196 | #define IF_SDL(x...) x | ||
197 | #define IFN_SDL(x...) | ||
198 | #else | ||
199 | #define IF_SDL(x...) | ||
200 | #define IFN_SDL(x...) x | ||
201 | #endif | ||
202 | |||
189 | struct thread_debug_info | 203 | struct thread_debug_info |
190 | { | 204 | { |
191 | char statusstr[4]; | 205 | char statusstr[4]; |
192 | char name[32]; | 206 | char name[32]; |
207 | #ifndef HAVE_SDL_THREADS | ||
193 | unsigned int stack_usage; | 208 | unsigned int stack_usage; |
209 | #endif | ||
194 | #if NUM_CORES > 1 | 210 | #if NUM_CORES > 1 |
195 | unsigned int core; | 211 | unsigned int core; |
196 | #endif | 212 | #endif |
diff --git a/firmware/kernel/mrsw_lock.c b/firmware/kernel/mrsw_lock.c index 45c8801b74..b683f63d5f 100644 --- a/firmware/kernel/mrsw_lock.c +++ b/firmware/kernel/mrsw_lock.c | |||
@@ -19,7 +19,8 @@ | |||
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | #include "kernel-internal.h" | 21 | #include "kernel-internal.h" |
22 | #include "mrsw-lock.h" | 22 | #include <string.h> |
23 | #include "mrsw_lock.h" | ||
23 | 24 | ||
24 | #ifdef HAVE_PRIORITY_SCHEDULING | 25 | #ifdef HAVE_PRIORITY_SCHEDULING |
25 | 26 | ||
@@ -34,13 +35,14 @@ mrsw_reader_claim(struct mrsw_lock *mrsw, struct thread_entry *current, | |||
34 | 35 | ||
35 | static FORCE_INLINE void | 36 | static FORCE_INLINE void |
36 | mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, | 37 | mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, |
37 | int count, unsigned int slotnum) | 38 | struct thread_entry *first, int count, |
39 | unsigned int slotnum) | ||
38 | { | 40 | { |
39 | /* If no writer is queued or has ownership then noone is queued; | 41 | /* If no writer is queued or has ownership then noone is queued; |
40 | if a writer owns it, then the reader would be blocked instead. | 42 | if a writer owns it, then the reader would be blocked instead. |
41 | Therefore, if the queue has threads, then the next after the | 43 | Therefore, if the queue has threads, then the next after the |
42 | owning readers is a writer and this is not the last reader. */ | 44 | owning readers is a writer and this is not the last reader. */ |
43 | if (mrsw->queue) | 45 | if (first) |
44 | corelock_lock(&mrsw->splay.cl); | 46 | corelock_lock(&mrsw->splay.cl); |
45 | 47 | ||
46 | threadbit_clear_bit(&mrsw->splay.mask, slotnum); | 48 | threadbit_clear_bit(&mrsw->splay.mask, slotnum); |
@@ -61,10 +63,10 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, | |||
61 | threadbit_popcount(&mrsw->splay.mask)); | 63 | threadbit_popcount(&mrsw->splay.mask)); |
62 | /* switch owner to sole remaining reader */ | 64 | /* switch owner to sole remaining reader */ |
63 | slotnum = threadbit_ffs(&mrsw->splay.mask); | 65 | slotnum = threadbit_ffs(&mrsw->splay.mask); |
64 | mrsw->splay.blocker.thread = thread_id_entry(slotnum); | 66 | mrsw->splay.blocker.thread = __thread_slot_entry(slotnum); |
65 | } | 67 | } |
66 | 68 | ||
67 | if (mrsw->queue) | 69 | if (first) |
68 | { | 70 | { |
69 | priority_disinherit(current, &mrsw->splay.blocker); | 71 | priority_disinherit(current, &mrsw->splay.blocker); |
70 | corelock_unlock(&mrsw->splay.cl); | 72 | corelock_unlock(&mrsw->splay.cl); |
@@ -72,23 +74,25 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, | |||
72 | } | 74 | } |
73 | 75 | ||
74 | static FORCE_INLINE unsigned int | 76 | static FORCE_INLINE unsigned int |
75 | mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, unsigned int slotnum) | 77 | mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread, |
78 | unsigned int slotnum) | ||
76 | { | 79 | { |
77 | threadbit_clear_bit(&mrsw->splay.mask, slotnum); | 80 | threadbit_clear_bit(&mrsw->splay.mask, slotnum); |
78 | return wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER); | 81 | return wakeup_thread(thread, WAKEUP_TRANSFER); |
79 | } | 82 | } |
80 | 83 | ||
81 | static FORCE_INLINE unsigned int | 84 | static FORCE_INLINE unsigned int |
82 | mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw) | 85 | mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread) |
83 | { | 86 | { |
84 | return wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER); | 87 | return wakeup_thread(thread, WAKEUP_TRANSFER); |
88 | (void)mrsw; | ||
85 | } | 89 | } |
86 | 90 | ||
87 | static FORCE_INLINE unsigned int | 91 | static FORCE_INLINE unsigned int |
88 | mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw) | 92 | mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first) |
89 | { | 93 | { |
90 | unsigned int result = wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER_MULTI); | 94 | unsigned int result = wakeup_thread(first, WAKEUP_TRANSFER_MULTI); |
91 | mrsw->count = thread_self_entry()->retval; | 95 | mrsw->count = __running_self_entry()->retval; |
92 | return result; | 96 | return result; |
93 | } | 97 | } |
94 | 98 | ||
@@ -97,32 +101,36 @@ mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw) | |||
97 | #define mrsw_reader_claim(mrsw, current, count, slotnum) \ | 101 | #define mrsw_reader_claim(mrsw, current, count, slotnum) \ |
98 | do {} while (0) | 102 | do {} while (0) |
99 | 103 | ||
100 | #define mrsw_reader_relinquish(mrsw, current, count, slotnum) \ | 104 | #define mrsw_reader_relinquish(mrsw, current, first, count, slotnum) \ |
101 | do {} while (0) | 105 | do {} while (0) |
102 | 106 | ||
103 | static FORCE_INLINE unsigned int | 107 | static FORCE_INLINE unsigned int |
104 | mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw) | 108 | mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread) |
105 | { | 109 | { |
106 | mrsw->splay.blocker.thread = mrsw->queue; | 110 | mrsw->splay.blocker.thread = thread; |
107 | return wakeup_thread(&mrsw->queue); | 111 | return wakeup_thread(thread); |
108 | } | 112 | } |
109 | 113 | ||
110 | static FORCE_INLINE unsigned int | 114 | static FORCE_INLINE unsigned int |
111 | mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw) | 115 | mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread) |
112 | { | 116 | { |
113 | mrsw->splay.blocker.thread = mrsw->queue; | 117 | mrsw->splay.blocker.thread = thread; |
114 | return wakeup_thread(&mrsw->queue); | 118 | return wakeup_thread(thread); |
115 | } | 119 | } |
116 | 120 | ||
117 | static FORCE_INLINE unsigned int | 121 | static FORCE_INLINE unsigned int |
118 | mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw) | 122 | mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first) |
119 | { | 123 | { |
120 | mrsw->splay.blocker.thread = NULL; | 124 | mrsw->splay.blocker.thread = NULL; |
121 | int count = 0; | 125 | int count = 1; |
122 | 126 | ||
123 | while (mrsw->queue && mrsw->queue->retval != 0) | 127 | while (1) |
124 | { | 128 | { |
125 | wakeup_thread(&mrsw->queue); | 129 | wakeup_thread(first); |
130 | |||
131 | if (!(first = WQ_THREAD_FIRST(&mrsw->queue)) || first->retval == 0) | ||
132 | break; | ||
133 | |||
126 | count++; | 134 | count++; |
127 | } | 135 | } |
128 | 136 | ||
@@ -138,14 +146,11 @@ mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw) | |||
138 | void mrsw_init(struct mrsw_lock *mrsw) | 146 | void mrsw_init(struct mrsw_lock *mrsw) |
139 | { | 147 | { |
140 | mrsw->count = 0; | 148 | mrsw->count = 0; |
141 | mrsw->queue = NULL; | 149 | wait_queue_init(&mrsw->queue); |
142 | mrsw->splay.blocker.thread = NULL; | 150 | blocker_splay_init(&mrsw->splay); |
143 | #ifdef HAVE_PRIORITY_SCHEDULING | 151 | #ifdef HAVE_PRIORITY_SCHEDULING |
144 | mrsw->splay.blocker.priority = PRIORITY_IDLE; | ||
145 | threadbit_clear(&mrsw->splay.mask); | ||
146 | corelock_init(&mrsw->splay.cl); | ||
147 | memset(mrsw->rdrecursion, 0, sizeof (mrsw->rdrecursion)); | 152 | memset(mrsw->rdrecursion, 0, sizeof (mrsw->rdrecursion)); |
148 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 153 | #endif |
149 | corelock_init(&mrsw->cl); | 154 | corelock_init(&mrsw->cl); |
150 | } | 155 | } |
151 | 156 | ||
@@ -154,7 +159,7 @@ void mrsw_init(struct mrsw_lock *mrsw) | |||
154 | * access recursively. The current writer is ignored and gets access. */ | 159 | * access recursively. The current writer is ignored and gets access. */ |
155 | void mrsw_read_acquire(struct mrsw_lock *mrsw) | 160 | void mrsw_read_acquire(struct mrsw_lock *mrsw) |
156 | { | 161 | { |
157 | struct thread_entry *current = thread_self_entry(); | 162 | struct thread_entry *current = __running_self_entry(); |
158 | 163 | ||
159 | if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) | 164 | if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) |
160 | return; /* Read request while holding write access; pass */ | 165 | return; /* Read request while holding write access; pass */ |
@@ -178,7 +183,7 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw) | |||
178 | 183 | ||
179 | int count = mrsw->count; | 184 | int count = mrsw->count; |
180 | 185 | ||
181 | if (LIKELY(count >= 0 && !mrsw->queue)) | 186 | if (LIKELY(count >= 0 && mrsw->queue.head == NULL)) |
182 | { | 187 | { |
183 | /* Lock open to readers: | 188 | /* Lock open to readers: |
184 | IFN_PRIO, mrsw->count tracks reader recursion */ | 189 | IFN_PRIO, mrsw->count tracks reader recursion */ |
@@ -189,13 +194,10 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw) | |||
189 | } | 194 | } |
190 | 195 | ||
191 | /* A writer owns it or is waiting; block... */ | 196 | /* A writer owns it or is waiting; block... */ |
192 | IF_COP( current->obj_cl = &mrsw->cl; ) | ||
193 | IF_PRIO( current->blocker = &mrsw->splay.blocker; ) | ||
194 | current->bqp = &mrsw->queue; | ||
195 | current->retval = 1; /* indicate multi-wake candidate */ | 197 | current->retval = 1; /* indicate multi-wake candidate */ |
196 | 198 | ||
197 | disable_irq(); | 199 | disable_irq(); |
198 | block_thread(current, TIMEOUT_BLOCK); | 200 | block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker); |
199 | 201 | ||
200 | corelock_unlock(&mrsw->cl); | 202 | corelock_unlock(&mrsw->cl); |
201 | 203 | ||
@@ -207,7 +209,7 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw) | |||
207 | * leave opens up access to writer threads. The current writer is ignored. */ | 209 | * leave opens up access to writer threads. The current writer is ignored. */ |
208 | void mrsw_read_release(struct mrsw_lock *mrsw) | 210 | void mrsw_read_release(struct mrsw_lock *mrsw) |
209 | { | 211 | { |
210 | struct thread_entry *current = thread_self_entry(); | 212 | struct thread_entry *current = __running_self_entry(); |
211 | 213 | ||
212 | if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) | 214 | if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) |
213 | return; /* Read release while holding write access; ignore */ | 215 | return; /* Read release while holding write access; ignore */ |
@@ -237,17 +239,18 @@ void mrsw_read_release(struct mrsw_lock *mrsw) | |||
237 | unsigned int result = THREAD_NONE; | 239 | unsigned int result = THREAD_NONE; |
238 | const int oldlevel = disable_irq_save(); | 240 | const int oldlevel = disable_irq_save(); |
239 | 241 | ||
240 | if (--count == 0 && mrsw->queue) | 242 | struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue); |
243 | if (--count == 0 && thread != NULL) | ||
241 | { | 244 | { |
242 | /* No readers remain and a writer is waiting */ | 245 | /* No readers remain and a writer is waiting */ |
243 | mrsw->count = -1; | 246 | mrsw->count = -1; |
244 | result = mrsw_reader_wakeup_writer(mrsw IF_PRIO(, slotnum)); | 247 | result = mrsw_reader_wakeup_writer(mrsw, thread IF_PRIO(, slotnum)); |
245 | } | 248 | } |
246 | else | 249 | else |
247 | { | 250 | { |
248 | /* Giving up readership; we may be the last, or not */ | 251 | /* Giving up readership; we may be the last, or not */ |
249 | mrsw->count = count; | 252 | mrsw->count = count; |
250 | mrsw_reader_relinquish(mrsw, current, count, slotnum); | 253 | mrsw_reader_relinquish(mrsw, current, thread, count, slotnum); |
251 | } | 254 | } |
252 | 255 | ||
253 | restore_irq(oldlevel); | 256 | restore_irq(oldlevel); |
@@ -265,7 +268,7 @@ void mrsw_read_release(struct mrsw_lock *mrsw) | |||
265 | * safely call recursively. */ | 268 | * safely call recursively. */ |
266 | void mrsw_write_acquire(struct mrsw_lock *mrsw) | 269 | void mrsw_write_acquire(struct mrsw_lock *mrsw) |
267 | { | 270 | { |
268 | struct thread_entry *current = thread_self_entry(); | 271 | struct thread_entry *current = __running_self_entry(); |
269 | 272 | ||
270 | if (current == mrsw->splay.blocker.thread) | 273 | if (current == mrsw->splay.blocker.thread) |
271 | { | 274 | { |
@@ -288,13 +291,10 @@ void mrsw_write_acquire(struct mrsw_lock *mrsw) | |||
288 | } | 291 | } |
289 | 292 | ||
290 | /* Readers present or a writer owns it - block... */ | 293 | /* Readers present or a writer owns it - block... */ |
291 | IF_COP( current->obj_cl = &mrsw->cl; ) | ||
292 | IF_PRIO( current->blocker = &mrsw->splay.blocker; ) | ||
293 | current->bqp = &mrsw->queue; | ||
294 | current->retval = 0; /* indicate single-wake candidate */ | 294 | current->retval = 0; /* indicate single-wake candidate */ |
295 | 295 | ||
296 | disable_irq(); | 296 | disable_irq(); |
297 | block_thread(current, TIMEOUT_BLOCK); | 297 | block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker); |
298 | 298 | ||
299 | corelock_unlock(&mrsw->cl); | 299 | corelock_unlock(&mrsw->cl); |
300 | 300 | ||
@@ -305,9 +305,9 @@ void mrsw_write_acquire(struct mrsw_lock *mrsw) | |||
305 | /* Release writer thread lock and open the lock to readers and writers */ | 305 | /* Release writer thread lock and open the lock to readers and writers */ |
306 | void mrsw_write_release(struct mrsw_lock *mrsw) | 306 | void mrsw_write_release(struct mrsw_lock *mrsw) |
307 | { | 307 | { |
308 | KERNEL_ASSERT(thread_self_entry() == mrsw->splay.blocker.thread, | 308 | KERNEL_ASSERT(__running_self_entry() == mrsw->splay.blocker.thread, |
309 | "mrsw_write_release->wrong thread (%s != %s)\n", | 309 | "mrsw_write_release->wrong thread (%s != %s)\n", |
310 | thread_self_entry()->name, | 310 | __running_self_entry()->name, |
311 | mrsw->splay.blocker.thread->name); | 311 | mrsw->splay.blocker.thread->name); |
312 | 312 | ||
313 | int count = mrsw->count; | 313 | int count = mrsw->count; |
@@ -323,15 +323,16 @@ void mrsw_write_release(struct mrsw_lock *mrsw) | |||
323 | corelock_lock(&mrsw->cl); | 323 | corelock_lock(&mrsw->cl); |
324 | const int oldlevel = disable_irq_save(); | 324 | const int oldlevel = disable_irq_save(); |
325 | 325 | ||
326 | if (mrsw->queue == NULL) /* 'count' becomes zero */ | 326 | struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue); |
327 | if (thread == NULL) /* 'count' becomes zero */ | ||
327 | { | 328 | { |
328 | mrsw->splay.blocker.thread = NULL; | 329 | mrsw->splay.blocker.thread = NULL; |
329 | mrsw->count = 0; | 330 | mrsw->count = 0; |
330 | } | 331 | } |
331 | else if (mrsw->queue->retval == 0) /* 'count' stays -1 */ | 332 | else if (thread->retval == 0) /* 'count' stays -1 */ |
332 | result = mrsw_writer_wakeup_writer(mrsw); | 333 | result = mrsw_writer_wakeup_writer(mrsw, thread); |
333 | else /* 'count' becomes # of readers */ | 334 | else /* 'count' becomes # of readers */ |
334 | result = mrsw_writer_wakeup_readers(mrsw); | 335 | result = mrsw_writer_wakeup_readers(mrsw, thread); |
335 | 336 | ||
336 | restore_irq(oldlevel); | 337 | restore_irq(oldlevel); |
337 | corelock_unlock(&mrsw->cl); | 338 | corelock_unlock(&mrsw->cl); |
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c index e5729dc893..fc49cc6d09 100644 --- a/firmware/kernel/mutex.c +++ b/firmware/kernel/mutex.c | |||
@@ -30,20 +30,19 @@ | |||
30 | * the object is available to other threads */ | 30 | * the object is available to other threads */ |
31 | void mutex_init(struct mutex *m) | 31 | void mutex_init(struct mutex *m) |
32 | { | 32 | { |
33 | corelock_init(&m->cl); | 33 | wait_queue_init(&m->queue); |
34 | m->queue = NULL; | ||
35 | m->recursion = 0; | 34 | m->recursion = 0; |
36 | m->blocker.thread = NULL; | 35 | blocker_init(&m->blocker); |
37 | #ifdef HAVE_PRIORITY_SCHEDULING | 36 | #ifdef HAVE_PRIORITY_SCHEDULING |
38 | m->blocker.priority = PRIORITY_IDLE; | ||
39 | m->no_preempt = false; | 37 | m->no_preempt = false; |
40 | #endif | 38 | #endif |
39 | corelock_init(&m->cl); | ||
41 | } | 40 | } |
42 | 41 | ||
43 | /* Gain ownership of a mutex object or block until it becomes free */ | 42 | /* Gain ownership of a mutex object or block until it becomes free */ |
44 | void mutex_lock(struct mutex *m) | 43 | void mutex_lock(struct mutex *m) |
45 | { | 44 | { |
46 | struct thread_entry *current = thread_self_entry(); | 45 | struct thread_entry *current = __running_self_entry(); |
47 | 46 | ||
48 | if(current == m->blocker.thread) | 47 | if(current == m->blocker.thread) |
49 | { | 48 | { |
@@ -65,12 +64,8 @@ void mutex_lock(struct mutex *m) | |||
65 | } | 64 | } |
66 | 65 | ||
67 | /* block until the lock is open... */ | 66 | /* block until the lock is open... */ |
68 | IF_COP( current->obj_cl = &m->cl; ) | ||
69 | IF_PRIO( current->blocker = &m->blocker; ) | ||
70 | current->bqp = &m->queue; | ||
71 | |||
72 | disable_irq(); | 67 | disable_irq(); |
73 | block_thread(current, TIMEOUT_BLOCK); | 68 | block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker); |
74 | 69 | ||
75 | corelock_unlock(&m->cl); | 70 | corelock_unlock(&m->cl); |
76 | 71 | ||
@@ -82,10 +77,10 @@ void mutex_lock(struct mutex *m) | |||
82 | void mutex_unlock(struct mutex *m) | 77 | void mutex_unlock(struct mutex *m) |
83 | { | 78 | { |
84 | /* unlocker not being the owner is an unlocking violation */ | 79 | /* unlocker not being the owner is an unlocking violation */ |
85 | KERNEL_ASSERT(m->blocker.thread == thread_self_entry(), | 80 | KERNEL_ASSERT(m->blocker.thread == __running_self_entry(), |
86 | "mutex_unlock->wrong thread (%s != %s)\n", | 81 | "mutex_unlock->wrong thread (%s != %s)\n", |
87 | m->blocker.thread->name, | 82 | m->blocker.thread->name, |
88 | thread_self_entry()->name); | 83 | __running_self_entry()->name); |
89 | 84 | ||
90 | if(m->recursion > 0) | 85 | if(m->recursion > 0) |
91 | { | 86 | { |
@@ -98,7 +93,8 @@ void mutex_unlock(struct mutex *m) | |||
98 | corelock_lock(&m->cl); | 93 | corelock_lock(&m->cl); |
99 | 94 | ||
100 | /* transfer to next queued thread if any */ | 95 | /* transfer to next queued thread if any */ |
101 | if(LIKELY(m->queue == NULL)) | 96 | struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue); |
97 | if(LIKELY(thread == NULL)) | ||
102 | { | 98 | { |
103 | /* no threads waiting - open the lock */ | 99 | /* no threads waiting - open the lock */ |
104 | m->blocker.thread = NULL; | 100 | m->blocker.thread = NULL; |
@@ -107,11 +103,7 @@ void mutex_unlock(struct mutex *m) | |||
107 | } | 103 | } |
108 | 104 | ||
109 | const int oldlevel = disable_irq_save(); | 105 | const int oldlevel = disable_irq_save(); |
110 | /* Tranfer of owning thread is handled in the wakeup protocol | 106 | unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER); |
111 | * if priorities are enabled otherwise just set it from the | ||
112 | * queue head. */ | ||
113 | IFN_PRIO( m->blocker.thread = m->queue; ) | ||
114 | unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER); | ||
115 | restore_irq(oldlevel); | 107 | restore_irq(oldlevel); |
116 | 108 | ||
117 | corelock_unlock(&m->cl); | 109 | corelock_unlock(&m->cl); |
diff --git a/firmware/kernel/pthread/thread.c b/firmware/kernel/pthread/thread.c index 354a946698..71cbd1d136 100644 --- a/firmware/kernel/pthread/thread.c +++ b/firmware/kernel/pthread/thread.c | |||
@@ -3,8 +3,8 @@ | |||
3 | #include <errno.h> | 3 | #include <errno.h> |
4 | #include <pthread.h> | 4 | #include <pthread.h> |
5 | #include "/usr/include/semaphore.h" | 5 | #include "/usr/include/semaphore.h" |
6 | #include "thread-internal.h" | ||
6 | #include "kernel.h" | 7 | #include "kernel.h" |
7 | #include "thread.h" | ||
8 | 8 | ||
9 | #define NSEC_PER_SEC 1000000000L | 9 | #define NSEC_PER_SEC 1000000000L |
10 | static inline void timespec_add_ns(struct timespec *a, uint64_t ns) | 10 | static inline void timespec_add_ns(struct timespec *a, uint64_t ns) |
@@ -25,11 +25,6 @@ struct thread_init_data { | |||
25 | 25 | ||
26 | __thread struct thread_entry *_current; | 26 | __thread struct thread_entry *_current; |
27 | 27 | ||
28 | struct thread_entry* thread_self_entry(void) | ||
29 | { | ||
30 | return _current; | ||
31 | } | ||
32 | |||
33 | unsigned int thread_self(void) | 28 | unsigned int thread_self(void) |
34 | { | 29 | { |
35 | return (unsigned) pthread_self(); | 30 | return (unsigned) pthread_self(); |
@@ -70,12 +65,10 @@ static void *trampoline(void *arg) | |||
70 | if (data->start_frozen) | 65 | if (data->start_frozen) |
71 | { | 66 | { |
72 | struct corelock thaw_lock; | 67 | struct corelock thaw_lock; |
73 | struct thread_entry *queue = NULL; | ||
74 | corelock_init(&thaw_lock); | 68 | corelock_init(&thaw_lock); |
75 | corelock_lock(&thaw_lock); | 69 | corelock_lock(&thaw_lock); |
76 | 70 | ||
77 | _current->lock = &thaw_lock; | 71 | _current->lock = &thaw_lock; |
78 | _current->bqp = &queue; | ||
79 | sem_post(&data->init_sem); | 72 | sem_post(&data->init_sem); |
80 | block_thread_switch(_current, _current->lock); | 73 | block_thread_switch(_current, _current->lock); |
81 | _current->lock = NULL; | 74 | _current->lock = NULL; |
@@ -97,7 +90,7 @@ void thread_thaw(unsigned int thread_id) | |||
97 | if (e->lock) | 90 | if (e->lock) |
98 | { | 91 | { |
99 | corelock_lock(e->lock); | 92 | corelock_lock(e->lock); |
100 | wakeup_thread(e->bqp); | 93 | wakeup_thread(e); |
101 | corelock_unlock(e->lock); | 94 | corelock_unlock(e->lock); |
102 | } | 95 | } |
103 | /* else: no lock. must be running already */ | 96 | /* else: no lock. must be running already */ |
@@ -135,7 +128,7 @@ unsigned int create_thread(void (*function)(void), | |||
135 | data->entry = entry; | 128 | data->entry = entry; |
136 | pthread_cond_init(&entry->cond, NULL); | 129 | pthread_cond_init(&entry->cond, NULL); |
137 | entry->runnable = true; | 130 | entry->runnable = true; |
138 | entry->l = (struct thread_list) { NULL, NULL }; | 131 | |
139 | sem_init(&data->init_sem, 0, 0); | 132 | sem_init(&data->init_sem, 0, 0); |
140 | 133 | ||
141 | if (pthread_create(&retval, NULL, trampoline, data) < 0) | 134 | if (pthread_create(&retval, NULL, trampoline, data) < 0) |
@@ -153,58 +146,19 @@ unsigned int create_thread(void (*function)(void), | |||
153 | return retval; | 146 | return retval; |
154 | } | 147 | } |
155 | 148 | ||
156 | static void add_to_list_l(struct thread_entry **list, | ||
157 | struct thread_entry *thread) | ||
158 | { | ||
159 | if (*list == NULL) | ||
160 | { | ||
161 | /* Insert into unoccupied list */ | ||
162 | thread->l.next = thread; | ||
163 | thread->l.prev = thread; | ||
164 | *list = thread; | ||
165 | } | ||
166 | else | ||
167 | { | ||
168 | /* Insert last */ | ||
169 | thread->l.next = *list; | ||
170 | thread->l.prev = (*list)->l.prev; | ||
171 | thread->l.prev->l.next = thread; | ||
172 | (*list)->l.prev = thread; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void remove_from_list_l(struct thread_entry **list, | ||
177 | struct thread_entry *thread) | ||
178 | { | ||
179 | if (thread == thread->l.next) | ||
180 | { | ||
181 | /* The only item */ | ||
182 | *list = NULL; | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | if (thread == *list) | ||
187 | { | ||
188 | /* List becomes next item */ | ||
189 | *list = thread->l.next; | ||
190 | } | ||
191 | |||
192 | /* Fix links to jump over the removed entry. */ | ||
193 | thread->l.prev->l.next = thread->l.next; | ||
194 | thread->l.next->l.prev = thread->l.prev; | ||
195 | } | ||
196 | |||
197 | /* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point | 149 | /* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point |
198 | * to a corelock instance, and this corelock must be held by the caller */ | 150 | * to a corelock instance, and this corelock must be held by the caller */ |
199 | void block_thread_switch(struct thread_entry *t, struct corelock *cl) | 151 | void block_thread_switch(struct thread_entry *t, struct corelock *cl) |
200 | { | 152 | { |
201 | t->runnable = false; | 153 | t->runnable = false; |
202 | add_to_list_l(t->bqp, t); | 154 | if (wait_queue_ptr(t)) |
155 | wait_queue_register(t); | ||
203 | while(!t->runnable) | 156 | while(!t->runnable) |
204 | pthread_cond_wait(&t->cond, &cl->mutex); | 157 | pthread_cond_wait(&t->cond, &cl->mutex); |
205 | } | 158 | } |
206 | 159 | ||
207 | void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corelock *cl) | 160 | void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, |
161 | struct corelock *cl) | ||
208 | { | 162 | { |
209 | int err = 0; | 163 | int err = 0; |
210 | struct timespec ts; | 164 | struct timespec ts; |
@@ -213,30 +167,25 @@ void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corel | |||
213 | timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ)); | 167 | timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ)); |
214 | 168 | ||
215 | t->runnable = false; | 169 | t->runnable = false; |
216 | add_to_list_l(t->bqp, t); | 170 | wait_queue_register(t->wqp, t); |
217 | while(!t->runnable && !err) | 171 | while(!t->runnable && !err) |
218 | err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts); | 172 | err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts); |
219 | 173 | ||
220 | if (err == ETIMEDOUT) | 174 | if (err == ETIMEDOUT) |
221 | { /* the thread timed out and was not explicitely woken up. | 175 | { /* the thread timed out and was not explicitely woken up. |
222 | * we need to do this now to mark it runnable again */ | 176 | * we need to do this now to mark it runnable again */ |
223 | remove_from_list_l(t->bqp, t); | ||
224 | t->runnable = true; | 177 | t->runnable = true; |
225 | if (t->wakeup_ext_cb) | 178 | /* NOTE: objects do their own removal upon timer expiration */ |
226 | t->wakeup_ext_cb(t); | ||
227 | } | 179 | } |
228 | } | 180 | } |
229 | 181 | ||
230 | unsigned int wakeup_thread(struct thread_entry **list) | 182 | unsigned int wakeup_thread(struct thread_entry *t) |
231 | { | 183 | { |
232 | struct thread_entry *t = *list; | 184 | if (t->wqp) |
233 | if (t) | 185 | wait_queue_remove(t); |
234 | { | 186 | t->runnable = true; |
235 | remove_from_list_l(list, t); | 187 | pthread_cond_signal(&t->cond); |
236 | t->runnable = true; | 188 | return THREAD_OK; |
237 | pthread_cond_signal(&t->cond); | ||
238 | } | ||
239 | return THREAD_NONE; | ||
240 | } | 189 | } |
241 | 190 | ||
242 | 191 | ||
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c index 0ba7d7e00b..927e55274c 100644 --- a/firmware/kernel/queue.c +++ b/firmware/kernel/queue.c | |||
@@ -51,7 +51,7 @@ static struct | |||
51 | * q->events[]: | XX | E1 | E2 | E3 | E4 | XX | | 51 | * q->events[]: | XX | E1 | E2 | E3 | E4 | XX | |
52 | * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL | | 52 | * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL | |
53 | * \/ \/ \/ | 53 | * \/ \/ \/ |
54 | * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-< | 54 | * q->send->list: 0<-|T0|<->|T1|<->|T2|<-------->|T3|->0 |
55 | * q->send->curr_sender: /\ | 55 | * q->send->curr_sender: /\ |
56 | * | 56 | * |
57 | * Thread has E0 in its own struct queue_event. | 57 | * Thread has E0 in its own struct queue_event. |
@@ -65,20 +65,20 @@ static struct | |||
65 | * more efficent to reject the majority of cases that don't need this | 65 | * more efficent to reject the majority of cases that don't need this |
66 | * called. | 66 | * called. |
67 | */ | 67 | */ |
68 | static void queue_release_sender(struct thread_entry * volatile * sender, | 68 | static void queue_release_sender_inner( |
69 | intptr_t retval) | 69 | struct thread_entry * volatile * sender, intptr_t retval) |
70 | { | 70 | { |
71 | struct thread_entry *thread = *sender; | 71 | struct thread_entry *thread = *sender; |
72 | |||
73 | *sender = NULL; /* Clear slot. */ | 72 | *sender = NULL; /* Clear slot. */ |
74 | #ifdef HAVE_WAKEUP_EXT_CB | ||
75 | thread->wakeup_ext_cb = NULL; /* Clear callback. */ | ||
76 | #endif | ||
77 | thread->retval = retval; /* Assign thread-local return value. */ | 73 | thread->retval = retval; /* Assign thread-local return value. */ |
78 | *thread->bqp = thread; /* Move blocking queue head to thread since | 74 | wakeup_thread(thread, WAKEUP_RELEASE); |
79 | wakeup_thread wakes the first thread in | 75 | } |
80 | the list. */ | 76 | |
81 | wakeup_thread(thread->bqp, WAKEUP_RELEASE); | 77 | static inline void queue_release_sender( |
78 | struct thread_entry * volatile * sender, intptr_t retval) | ||
79 | { | ||
80 | if(UNLIKELY(*sender)) | ||
81 | queue_release_sender_inner(sender, retval); | ||
82 | } | 82 | } |
83 | 83 | ||
84 | /* Releases any waiting threads that are queued with queue_send - | 84 | /* Releases any waiting threads that are queued with queue_send - |
@@ -93,26 +93,11 @@ static void queue_release_all_senders(struct event_queue *q) | |||
93 | { | 93 | { |
94 | struct thread_entry **spp = | 94 | struct thread_entry **spp = |
95 | &q->send->senders[i & QUEUE_LENGTH_MASK]; | 95 | &q->send->senders[i & QUEUE_LENGTH_MASK]; |
96 | 96 | queue_release_sender(spp, 0); | |
97 | if(*spp) | ||
98 | { | ||
99 | queue_release_sender(spp, 0); | ||
100 | } | ||
101 | } | 97 | } |
102 | } | 98 | } |
103 | } | 99 | } |
104 | 100 | ||
105 | #ifdef HAVE_WAKEUP_EXT_CB | ||
106 | /* Callback to do extra forced removal steps from sender list in addition | ||
107 | * to the normal blocking queue removal and priority dis-inherit */ | ||
108 | static void queue_remove_sender_thread_cb(struct thread_entry *thread) | ||
109 | { | ||
110 | *((struct thread_entry **)thread->retval) = NULL; | ||
111 | thread->wakeup_ext_cb = NULL; | ||
112 | thread->retval = 0; | ||
113 | } | ||
114 | #endif /* HAVE_WAKEUP_EXT_CB */ | ||
115 | |||
116 | /* Enables queue_send on the specified queue - caller allocates the extra | 101 | /* Enables queue_send on the specified queue - caller allocates the extra |
117 | * data structure. Only queues which are taken to be owned by a thread should | 102 | * data structure. Only queues which are taken to be owned by a thread should |
118 | * enable this however an official owner is not compulsory but must be | 103 | * enable this however an official owner is not compulsory but must be |
@@ -132,11 +117,12 @@ void queue_enable_queue_send(struct event_queue *q, | |||
132 | if(send != NULL && q->send == NULL) | 117 | if(send != NULL && q->send == NULL) |
133 | { | 118 | { |
134 | memset(send, 0, sizeof(*send)); | 119 | memset(send, 0, sizeof(*send)); |
120 | wait_queue_init(&send->list); | ||
135 | #ifdef HAVE_PRIORITY_SCHEDULING | 121 | #ifdef HAVE_PRIORITY_SCHEDULING |
136 | send->blocker.priority = PRIORITY_IDLE; | 122 | blocker_init(&send->blocker); |
137 | if(owner_id != 0) | 123 | if(owner_id != 0) |
138 | { | 124 | { |
139 | send->blocker.thread = thread_id_entry(owner_id); | 125 | send->blocker.thread = __thread_id_entry(owner_id); |
140 | q->blocker_p = &send->blocker; | 126 | q->blocker_p = &send->blocker; |
141 | } | 127 | } |
142 | #endif | 128 | #endif |
@@ -154,24 +140,14 @@ static inline void queue_do_unblock_sender(struct queue_sender_list *send, | |||
154 | unsigned int i) | 140 | unsigned int i) |
155 | { | 141 | { |
156 | if(send) | 142 | if(send) |
157 | { | 143 | queue_release_sender(&send->senders[i], 0); |
158 | struct thread_entry **spp = &send->senders[i]; | ||
159 | |||
160 | if(UNLIKELY(*spp)) | ||
161 | { | ||
162 | queue_release_sender(spp, 0); | ||
163 | } | ||
164 | } | ||
165 | } | 144 | } |
166 | 145 | ||
167 | /* Perform the auto-reply sequence */ | 146 | /* Perform the auto-reply sequence */ |
168 | static inline void queue_do_auto_reply(struct queue_sender_list *send) | 147 | static inline void queue_do_auto_reply(struct queue_sender_list *send) |
169 | { | 148 | { |
170 | if(send && send->curr_sender) | 149 | if(send) |
171 | { | ||
172 | /* auto-reply */ | ||
173 | queue_release_sender(&send->curr_sender, 0); | 150 | queue_release_sender(&send->curr_sender, 0); |
174 | } | ||
175 | } | 151 | } |
176 | 152 | ||
177 | /* Moves waiting thread's refrence from the senders array to the | 153 | /* Moves waiting thread's refrence from the senders array to the |
@@ -191,7 +167,6 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send, | |||
191 | /* Move thread reference from array to the next thread | 167 | /* Move thread reference from array to the next thread |
192 | that queue_reply will release */ | 168 | that queue_reply will release */ |
193 | send->curr_sender = *spp; | 169 | send->curr_sender = *spp; |
194 | (*spp)->retval = (intptr_t)spp; | ||
195 | *spp = NULL; | 170 | *spp = NULL; |
196 | } | 171 | } |
197 | /* else message was posted asynchronously with queue_post */ | 172 | /* else message was posted asynchronously with queue_post */ |
@@ -205,18 +180,28 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send, | |||
205 | #define queue_do_fetch_sender(send, rd) | 180 | #define queue_do_fetch_sender(send, rd) |
206 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ | 181 | #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ |
207 | 182 | ||
183 | static void queue_wake_waiter_inner(struct thread_entry *thread) | ||
184 | { | ||
185 | wakeup_thread(thread, WAKEUP_DEFAULT); | ||
186 | } | ||
187 | |||
188 | static inline void queue_wake_waiter(struct event_queue *q) | ||
189 | { | ||
190 | struct thread_entry *thread = WQ_THREAD_FIRST(&q->queue); | ||
191 | if(thread != NULL) | ||
192 | queue_wake_waiter_inner(thread); | ||
193 | } | ||
194 | |||
208 | /* Queue must not be available for use during this call */ | 195 | /* Queue must not be available for use during this call */ |
209 | void queue_init(struct event_queue *q, bool register_queue) | 196 | void queue_init(struct event_queue *q, bool register_queue) |
210 | { | 197 | { |
211 | int oldlevel = disable_irq_save(); | 198 | int oldlevel = disable_irq_save(); |
212 | 199 | ||
213 | if(register_queue) | 200 | if(register_queue) |
214 | { | ||
215 | corelock_lock(&all_queues.cl); | 201 | corelock_lock(&all_queues.cl); |
216 | } | ||
217 | 202 | ||
218 | corelock_init(&q->cl); | 203 | corelock_init(&q->cl); |
219 | q->queue = NULL; | 204 | wait_queue_init(&q->queue); |
220 | /* What garbage is in write is irrelevant because of the masking design- | 205 | /* What garbage is in write is irrelevant because of the masking design- |
221 | * any other functions the empty the queue do this as well so that | 206 | * any other functions the empty the queue do this as well so that |
222 | * queue_count and queue_empty return sane values in the case of a | 207 | * queue_count and queue_empty return sane values in the case of a |
@@ -261,7 +246,7 @@ void queue_delete(struct event_queue *q) | |||
261 | corelock_unlock(&all_queues.cl); | 246 | corelock_unlock(&all_queues.cl); |
262 | 247 | ||
263 | /* Release thread(s) waiting on queue head */ | 248 | /* Release thread(s) waiting on queue head */ |
264 | thread_queue_wake(&q->queue); | 249 | wait_queue_wake(&q->queue); |
265 | 250 | ||
266 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 251 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
267 | if(q->send) | 252 | if(q->send) |
@@ -293,7 +278,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
293 | 278 | ||
294 | #ifdef HAVE_PRIORITY_SCHEDULING | 279 | #ifdef HAVE_PRIORITY_SCHEDULING |
295 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || | 280 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || |
296 | QUEUE_GET_THREAD(q) == thread_self_entry(), | 281 | QUEUE_GET_THREAD(q) == __running_self_entry(), |
297 | "queue_wait->wrong thread\n"); | 282 | "queue_wait->wrong thread\n"); |
298 | #endif | 283 | #endif |
299 | 284 | ||
@@ -307,18 +292,12 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
307 | 292 | ||
308 | while(1) | 293 | while(1) |
309 | { | 294 | { |
310 | struct thread_entry *current; | ||
311 | |||
312 | rd = q->read; | 295 | rd = q->read; |
313 | if (rd != q->write) /* A waking message could disappear */ | 296 | if (rd != q->write) /* A waking message could disappear */ |
314 | break; | 297 | break; |
315 | 298 | ||
316 | current = thread_self_entry(); | 299 | struct thread_entry *current = __running_self_entry(); |
317 | 300 | block_thread(current, TIMEOUT_BLOCK, &q->queue, NULL); | |
318 | IF_COP( current->obj_cl = &q->cl; ) | ||
319 | current->bqp = &q->queue; | ||
320 | |||
321 | block_thread(current, TIMEOUT_BLOCK); | ||
322 | 301 | ||
323 | corelock_unlock(&q->cl); | 302 | corelock_unlock(&q->cl); |
324 | switch_thread(); | 303 | switch_thread(); |
@@ -349,16 +328,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
349 | int oldlevel; | 328 | int oldlevel; |
350 | unsigned int rd, wr; | 329 | unsigned int rd, wr; |
351 | 330 | ||
352 | /* this function works only with a positive number (or zero) of ticks */ | ||
353 | if (ticks == TIMEOUT_BLOCK) | ||
354 | { | ||
355 | queue_wait(q, ev); | ||
356 | return; | ||
357 | } | ||
358 | |||
359 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 331 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
360 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || | 332 | KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || |
361 | QUEUE_GET_THREAD(q) == thread_self_entry(), | 333 | QUEUE_GET_THREAD(q) == __running_self_entry(), |
362 | "queue_wait_w_tmo->wrong thread\n"); | 334 | "queue_wait_w_tmo->wrong thread\n"); |
363 | #endif | 335 | #endif |
364 | 336 | ||
@@ -372,14 +344,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
372 | 344 | ||
373 | rd = q->read; | 345 | rd = q->read; |
374 | wr = q->write; | 346 | wr = q->write; |
375 | if (rd == wr && ticks > 0) | 347 | if (rd == wr && ticks != 0) |
376 | { | 348 | { |
377 | struct thread_entry *current = thread_self_entry(); | 349 | struct thread_entry *current = __running_self_entry(); |
378 | 350 | block_thread(current, ticks, &q->queue, NULL); | |
379 | IF_COP( current->obj_cl = &q->cl; ) | ||
380 | current->bqp = &q->queue; | ||
381 | |||
382 | block_thread(current, ticks); | ||
383 | corelock_unlock(&q->cl); | 351 | corelock_unlock(&q->cl); |
384 | 352 | ||
385 | switch_thread(); | 353 | switch_thread(); |
@@ -389,6 +357,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
389 | 357 | ||
390 | rd = q->read; | 358 | rd = q->read; |
391 | wr = q->write; | 359 | wr = q->write; |
360 | |||
361 | wait_queue_try_remove(current); | ||
392 | } | 362 | } |
393 | 363 | ||
394 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 364 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
@@ -436,7 +406,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data) | |||
436 | queue_do_unblock_sender(q->send, wr); | 406 | queue_do_unblock_sender(q->send, wr); |
437 | 407 | ||
438 | /* Wakeup a waiting thread if any */ | 408 | /* Wakeup a waiting thread if any */ |
439 | wakeup_thread(&q->queue, WAKEUP_DEFAULT); | 409 | queue_wake_waiter(q); |
440 | 410 | ||
441 | corelock_unlock(&q->cl); | 411 | corelock_unlock(&q->cl); |
442 | restore_irq(oldlevel); | 412 | restore_irq(oldlevel); |
@@ -465,28 +435,17 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
465 | { | 435 | { |
466 | struct queue_sender_list *send = q->send; | 436 | struct queue_sender_list *send = q->send; |
467 | struct thread_entry **spp = &send->senders[wr]; | 437 | struct thread_entry **spp = &send->senders[wr]; |
468 | struct thread_entry *current = thread_self_entry(); | 438 | struct thread_entry *current = __running_self_entry(); |
469 | 439 | ||
470 | if(UNLIKELY(*spp)) | 440 | /* overflow protect - unblock any thread waiting at this index */ |
471 | { | 441 | queue_release_sender(spp, 0); |
472 | /* overflow protect - unblock any thread waiting at this index */ | ||
473 | queue_release_sender(spp, 0); | ||
474 | } | ||
475 | 442 | ||
476 | /* Wakeup a waiting thread if any */ | 443 | /* Wakeup a waiting thread if any */ |
477 | wakeup_thread(&q->queue, WAKEUP_DEFAULT); | 444 | queue_wake_waiter(q); |
478 | 445 | ||
479 | /* Save thread in slot, add to list and wait for reply */ | 446 | /* Save thread in slot, add to list and wait for reply */ |
480 | *spp = current; | 447 | *spp = current; |
481 | IF_COP( current->obj_cl = &q->cl; ) | 448 | block_thread(current, TIMEOUT_BLOCK, &send->list, q->blocker_p); |
482 | IF_PRIO( current->blocker = q->blocker_p; ) | ||
483 | #ifdef HAVE_WAKEUP_EXT_CB | ||
484 | current->wakeup_ext_cb = queue_remove_sender_thread_cb; | ||
485 | #endif | ||
486 | current->retval = (intptr_t)spp; | ||
487 | current->bqp = &send->list; | ||
488 | |||
489 | block_thread(current, TIMEOUT_BLOCK); | ||
490 | 449 | ||
491 | corelock_unlock(&q->cl); | 450 | corelock_unlock(&q->cl); |
492 | switch_thread(); | 451 | switch_thread(); |
@@ -495,7 +454,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
495 | } | 454 | } |
496 | 455 | ||
497 | /* Function as queue_post if sending is not enabled */ | 456 | /* Function as queue_post if sending is not enabled */ |
498 | wakeup_thread(&q->queue, WAKEUP_DEFAULT); | 457 | queue_wake_waiter(q); |
499 | 458 | ||
500 | corelock_unlock(&q->cl); | 459 | corelock_unlock(&q->cl); |
501 | restore_irq(oldlevel); | 460 | restore_irq(oldlevel); |
@@ -530,16 +489,12 @@ void queue_reply(struct event_queue *q, intptr_t retval) | |||
530 | { | 489 | { |
531 | if(q->send && q->send->curr_sender) | 490 | if(q->send && q->send->curr_sender) |
532 | { | 491 | { |
533 | struct queue_sender_list *sender; | ||
534 | |||
535 | int oldlevel = disable_irq_save(); | 492 | int oldlevel = disable_irq_save(); |
536 | corelock_lock(&q->cl); | 493 | corelock_lock(&q->cl); |
537 | 494 | ||
538 | sender = q->send; | 495 | struct queue_sender_list *send = q->send; |
539 | 496 | if(send) | |
540 | /* Double-check locking */ | 497 | queue_release_sender(&send->curr_sender, retval); |
541 | if(LIKELY(sender && sender->curr_sender)) | ||
542 | queue_release_sender(&sender->curr_sender, retval); | ||
543 | 498 | ||
544 | corelock_unlock(&q->cl); | 499 | corelock_unlock(&q->cl); |
545 | restore_irq(oldlevel); | 500 | restore_irq(oldlevel); |
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c index 1505038fbc..5e9e46798f 100644 --- a/firmware/kernel/semaphore.c +++ b/firmware/kernel/semaphore.c | |||
@@ -24,6 +24,7 @@ | |||
24 | /**************************************************************************** | 24 | /**************************************************************************** |
25 | * Simple semaphore functions ;) | 25 | * Simple semaphore functions ;) |
26 | ****************************************************************************/ | 26 | ****************************************************************************/ |
27 | |||
27 | /* Initialize the semaphore object. | 28 | /* Initialize the semaphore object. |
28 | * max = maximum up count the semaphore may assume (max >= 1) | 29 | * max = maximum up count the semaphore may assume (max >= 1) |
29 | * start = initial count of semaphore (0 <= count <= max) */ | 30 | * start = initial count of semaphore (0 <= count <= max) */ |
@@ -31,7 +32,7 @@ void semaphore_init(struct semaphore *s, int max, int start) | |||
31 | { | 32 | { |
32 | KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, | 33 | KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, |
33 | "semaphore_init->inv arg\n"); | 34 | "semaphore_init->inv arg\n"); |
34 | s->queue = NULL; | 35 | wait_queue_init(&s->queue); |
35 | s->max = max; | 36 | s->max = max; |
36 | s->count = start; | 37 | s->count = start; |
37 | corelock_init(&s->cl); | 38 | corelock_init(&s->cl); |
@@ -42,44 +43,49 @@ void semaphore_init(struct semaphore *s, int max, int start) | |||
42 | * safely be used in an ISR. */ | 43 | * safely be used in an ISR. */ |
43 | int semaphore_wait(struct semaphore *s, int timeout) | 44 | int semaphore_wait(struct semaphore *s, int timeout) |
44 | { | 45 | { |
45 | int ret; | 46 | int ret = OBJ_WAIT_TIMEDOUT; |
46 | int oldlevel; | ||
47 | int count; | ||
48 | 47 | ||
49 | oldlevel = disable_irq_save(); | 48 | int oldlevel = disable_irq_save(); |
50 | corelock_lock(&s->cl); | 49 | corelock_lock(&s->cl); |
51 | 50 | ||
52 | count = s->count; | 51 | int count = s->count; |
53 | |||
54 | if(LIKELY(count > 0)) | 52 | if(LIKELY(count > 0)) |
55 | { | 53 | { |
56 | /* count is not zero; down it */ | 54 | /* count is not zero; down it */ |
57 | s->count = count - 1; | 55 | s->count = count - 1; |
58 | ret = OBJ_WAIT_SUCCEEDED; | 56 | ret = OBJ_WAIT_SUCCEEDED; |
59 | } | 57 | } |
60 | else if(timeout == 0) | 58 | else if(timeout != 0) |
61 | { | ||
62 | /* just polling it */ | ||
63 | ret = OBJ_WAIT_TIMEDOUT; | ||
64 | } | ||
65 | else | ||
66 | { | 59 | { |
67 | /* too many waits - block until count is upped... */ | 60 | /* too many waits - block until count is upped... */ |
68 | struct thread_entry * current = thread_self_entry(); | 61 | struct thread_entry *current = __running_self_entry(); |
69 | IF_COP( current->obj_cl = &s->cl; ) | 62 | |
70 | current->bqp = &s->queue; | 63 | block_thread(current, timeout, &s->queue, NULL); |
71 | /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was | ||
72 | * explicit in semaphore_release */ | ||
73 | current->retval = OBJ_WAIT_TIMEDOUT; | ||
74 | |||
75 | block_thread(current, timeout); | ||
76 | corelock_unlock(&s->cl); | 64 | corelock_unlock(&s->cl); |
77 | 65 | ||
78 | /* ...and turn control over to next thread */ | 66 | /* ...and turn control over to next thread */ |
79 | switch_thread(); | 67 | switch_thread(); |
80 | 68 | ||
81 | return current->retval; | 69 | /* if explicit wake indicated; do no more */ |
70 | if(LIKELY(!wait_queue_ptr(current))) | ||
71 | return OBJ_WAIT_SUCCEEDED; | ||
72 | |||
73 | disable_irq(); | ||
74 | corelock_lock(&s->cl); | ||
75 | |||
76 | /* see if anyone got us after the expired wait */ | ||
77 | if(wait_queue_try_remove(current)) | ||
78 | { | ||
79 | count = s->count; | ||
80 | if(count > 0) | ||
81 | { | ||
82 | /* down it lately */ | ||
83 | s->count = count - 1; | ||
84 | ret = OBJ_WAIT_SUCCEEDED; | ||
85 | } | ||
86 | } | ||
82 | } | 87 | } |
88 | /* else just polling it */ | ||
83 | 89 | ||
84 | corelock_unlock(&s->cl); | 90 | corelock_unlock(&s->cl); |
85 | restore_irq(oldlevel); | 91 | restore_irq(oldlevel); |
@@ -93,18 +99,17 @@ int semaphore_wait(struct semaphore *s, int timeout) | |||
93 | void semaphore_release(struct semaphore *s) | 99 | void semaphore_release(struct semaphore *s) |
94 | { | 100 | { |
95 | unsigned int result = THREAD_NONE; | 101 | unsigned int result = THREAD_NONE; |
96 | int oldlevel; | ||
97 | 102 | ||
98 | oldlevel = disable_irq_save(); | 103 | int oldlevel = disable_irq_save(); |
99 | corelock_lock(&s->cl); | 104 | corelock_lock(&s->cl); |
100 | 105 | ||
101 | if(LIKELY(s->queue != NULL)) | 106 | struct thread_entry *thread = WQ_THREAD_FIRST(&s->queue); |
107 | if(LIKELY(thread != NULL)) | ||
102 | { | 108 | { |
103 | /* a thread was queued - wake it up and keep count at 0 */ | 109 | /* a thread was queued - wake it up and keep count at 0 */ |
104 | KERNEL_ASSERT(s->count == 0, | 110 | KERNEL_ASSERT(s->count == 0, |
105 | "semaphore_release->threads queued but count=%d!\n", s->count); | 111 | "semaphore_release->threads queued but count=%d!\n", s->count); |
106 | s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ | 112 | result = wakeup_thread(thread, WAKEUP_DEFAULT); |
107 | result = wakeup_thread(&s->queue, WAKEUP_DEFAULT); | ||
108 | } | 113 | } |
109 | else | 114 | else |
110 | { | 115 | { |
diff --git a/firmware/kernel/thread-common.c b/firmware/kernel/thread-common.c index b8b8ffbd4c..aad6610feb 100644 --- a/firmware/kernel/thread-common.c +++ b/firmware/kernel/thread-common.c | |||
@@ -18,39 +18,222 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | #include "thread-internal.h" | 21 | #include "kernel-internal.h" |
22 | #include "system.h" | 22 | #include "system.h" |
23 | 23 | ||
24 | /* Unless otherwise defined, do nothing */ | ||
25 | #ifndef YIELD_KERNEL_HOOK | ||
26 | #define YIELD_KERNEL_HOOK() false | ||
27 | #endif | ||
28 | #ifndef SLEEP_KERNEL_HOOK | ||
29 | #define SLEEP_KERNEL_HOOK(ticks) false | ||
30 | #endif | ||
31 | |||
32 | const char __main_thread_name_str[] = "main"; | ||
33 | |||
34 | /* Array indexing is more efficient in inlines if the elements are a native | ||
35 | word size (100s of bytes fewer instructions) */ | ||
36 | |||
37 | #if NUM_CORES > 1 | ||
38 | static struct core_entry __core_entries[NUM_CORES] IBSS_ATTR; | ||
39 | struct core_entry *__cores[NUM_CORES] IBSS_ATTR; | ||
40 | #else | ||
41 | struct core_entry __cores[NUM_CORES] IBSS_ATTR; | ||
42 | #endif | ||
43 | |||
44 | static struct thread_entry __thread_entries[MAXTHREADS] IBSS_ATTR; | ||
45 | struct thread_entry *__threads[MAXTHREADS] IBSS_ATTR; | ||
46 | |||
47 | |||
48 | /** Internal functions **/ | ||
49 | |||
24 | /*--------------------------------------------------------------------------- | 50 | /*--------------------------------------------------------------------------- |
25 | * Wakeup an entire queue of threads - returns bitwise-or of return bitmask | 51 | * Find an empty thread slot or NULL if none found. The slot returned will |
26 | * from each operation or THREAD_NONE of nothing was awakened. Object owning | 52 | * be locked on multicore. |
27 | * the queue must be locked first. | ||
28 | * | ||
29 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
30 | *--------------------------------------------------------------------------- | 53 | *--------------------------------------------------------------------------- |
31 | */ | 54 | */ |
32 | unsigned int thread_queue_wake(struct thread_entry **list) | 55 | static struct threadalloc |
33 | { | 56 | { |
34 | unsigned result = THREAD_NONE; | 57 | threadbit_t avail; |
58 | #if NUM_CORES > 1 | ||
59 | struct corelock cl; | ||
60 | #endif | ||
61 | } threadalloc SHAREDBSS_ATTR; | ||
62 | |||
63 | /*--------------------------------------------------------------------------- | ||
64 | * Initialize the thread allocator | ||
65 | *--------------------------------------------------------------------------- | ||
66 | */ | ||
67 | void thread_alloc_init(void) | ||
68 | { | ||
69 | corelock_init(&threadalloc.cl); | ||
35 | 70 | ||
36 | for (;;) | 71 | for (unsigned int core = 0; core < NUM_CORES; core++) |
37 | { | 72 | { |
38 | unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); | 73 | #if NUM_CORES > 1 |
74 | struct core_entry *c = &__core_entries[core]; | ||
75 | __cores[core] = c; | ||
76 | #else | ||
77 | struct core_entry *c = &__cores[core]; | ||
78 | #endif | ||
79 | rtr_queue_init(&c->rtr); | ||
80 | corelock_init(&c->rtr_cl); | ||
81 | tmo_queue_init(&c->tmo); | ||
82 | c->next_tmo_check = current_tick; /* Something not in the past */ | ||
83 | } | ||
39 | 84 | ||
40 | if (rc == THREAD_NONE) | 85 | for (unsigned int slotnum = 0; slotnum < MAXTHREADS; slotnum++) |
41 | break; /* No more threads */ | 86 | { |
87 | struct thread_entry *t = &__thread_entries[slotnum]; | ||
88 | __threads[slotnum] = t; | ||
89 | corelock_init(&t->waiter_cl); | ||
90 | corelock_init(&t->slot_cl); | ||
91 | t->id = THREAD_ID_INIT(slotnum); | ||
92 | threadbit_set_bit(&threadalloc.avail, slotnum); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /*--------------------------------------------------------------------------- | ||
97 | * Allocate a thread alot | ||
98 | *--------------------------------------------------------------------------- | ||
99 | */ | ||
100 | struct thread_entry * thread_alloc(void) | ||
101 | { | ||
102 | struct thread_entry *thread = NULL; | ||
42 | 103 | ||
43 | result |= rc; | 104 | corelock_lock(&threadalloc.cl); |
105 | |||
106 | unsigned int slotnum = threadbit_ffs(&threadalloc.avail); | ||
107 | if (slotnum < MAXTHREADS) | ||
108 | { | ||
109 | threadbit_clear_bit(&threadalloc.avail, slotnum); | ||
110 | thread = __threads[slotnum]; | ||
44 | } | 111 | } |
45 | 112 | ||
113 | corelock_unlock(&threadalloc.cl); | ||
114 | |||
115 | return thread; | ||
116 | } | ||
117 | |||
118 | /*--------------------------------------------------------------------------- | ||
119 | * Free the thread slot of 'thread' | ||
120 | *--------------------------------------------------------------------------- | ||
121 | */ | ||
122 | void thread_free(struct thread_entry *thread) | ||
123 | { | ||
124 | corelock_lock(&threadalloc.cl); | ||
125 | threadbit_set_bit(&threadalloc.avail, THREAD_ID_SLOT(thread->id)); | ||
126 | corelock_unlock(&threadalloc.cl); | ||
127 | } | ||
128 | |||
129 | /*--------------------------------------------------------------------------- | ||
130 | * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. | ||
131 | *--------------------------------------------------------------------------- | ||
132 | */ | ||
133 | void new_thread_id(struct thread_entry *thread) | ||
134 | { | ||
135 | uint32_t id = thread->id + (1u << THREAD_ID_VERSION_SHIFT); | ||
136 | |||
137 | /* If wrapped to 0, make it 1 */ | ||
138 | if ((id & THREAD_ID_VERSION_MASK) == 0) | ||
139 | id |= (1u << THREAD_ID_VERSION_SHIFT); | ||
140 | |||
141 | thread->id = id; | ||
142 | } | ||
143 | |||
144 | /*--------------------------------------------------------------------------- | ||
145 | * Wakeup an entire queue of threads - returns bitwise-or of return bitmask | ||
146 | * from each operation or THREAD_NONE of nothing was awakened. | ||
147 | *--------------------------------------------------------------------------- | ||
148 | */ | ||
149 | unsigned int wait_queue_wake(struct __wait_queue *wqp) | ||
150 | { | ||
151 | unsigned result = THREAD_NONE; | ||
152 | struct thread_entry *thread; | ||
153 | |||
154 | while ((thread = WQ_THREAD_FIRST(wqp))) | ||
155 | result |= wakeup_thread(thread, WAKEUP_DEFAULT); | ||
156 | |||
46 | return result; | 157 | return result; |
47 | } | 158 | } |
48 | 159 | ||
49 | 160 | ||
161 | /** Public functions **/ | ||
162 | |||
163 | #ifdef RB_PROFILE | ||
164 | void profile_thread(void) | ||
165 | { | ||
166 | profstart(THREAD_ID_SLOT(__running_self_entry()->id)); | ||
167 | } | ||
168 | #endif | ||
169 | |||
170 | /*--------------------------------------------------------------------------- | ||
171 | * Return the thread id of the calling thread | ||
172 | * -------------------------------------------------------------------------- | ||
173 | */ | ||
174 | unsigned int thread_self(void) | ||
175 | { | ||
176 | return __running_self_entry()->id; | ||
177 | } | ||
178 | |||
179 | /*--------------------------------------------------------------------------- | ||
180 | * Suspends a thread's execution for at least the specified number of ticks. | ||
181 | * | ||
182 | * May result in CPU core entering wait-for-interrupt mode if no other thread | ||
183 | * may be scheduled. | ||
184 | * | ||
185 | * NOTE: sleep(0) sleeps until the end of the current tick | ||
186 | * sleep(n) that doesn't result in rescheduling: | ||
187 | * n <= ticks suspended < n + 1 | ||
188 | * n to n+1 is a lower bound. Other factors may affect the actual time | ||
189 | * a thread is suspended before it runs again. | ||
190 | *--------------------------------------------------------------------------- | ||
191 | */ | ||
192 | unsigned sleep(unsigned ticks) | ||
193 | { | ||
194 | /* In certain situations, certain bootloaders in particular, a normal | ||
195 | * threading call is inappropriate. */ | ||
196 | if (SLEEP_KERNEL_HOOK(ticks)) | ||
197 | return 0; /* Handled */ | ||
198 | |||
199 | disable_irq(); | ||
200 | sleep_thread(ticks); | ||
201 | switch_thread(); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | /*--------------------------------------------------------------------------- | ||
206 | * Elects another thread to run or, if no other thread may be made ready to | ||
207 | * run, immediately returns control back to the calling thread. | ||
208 | *--------------------------------------------------------------------------- | ||
209 | */ | ||
210 | void yield(void) | ||
211 | { | ||
212 | /* In certain situations, certain bootloaders in particular, a normal | ||
213 | * threading call is inappropriate. */ | ||
214 | if (YIELD_KERNEL_HOOK()) | ||
215 | return; /* Handled */ | ||
216 | |||
217 | switch_thread(); | ||
218 | } | ||
219 | |||
220 | |||
50 | /** Debug screen stuff **/ | 221 | /** Debug screen stuff **/ |
51 | 222 | ||
223 | void format_thread_name(char *buf, size_t bufsize, | ||
224 | const struct thread_entry *thread) | ||
225 | { | ||
226 | const char *name = thread->name; | ||
227 | if (!name) | ||
228 | name = ""; | ||
229 | |||
230 | const char *fmt = *name ? "%s" : "%s%08lX"; | ||
231 | snprintf(buf, bufsize, fmt, name, thread->id); | ||
232 | } | ||
233 | |||
234 | #ifndef HAVE_SDL_THREADS | ||
52 | /*--------------------------------------------------------------------------- | 235 | /*--------------------------------------------------------------------------- |
53 | * returns the stack space used in bytes | 236 | * Returns the maximum percentage of the stack ever used during runtime. |
54 | *--------------------------------------------------------------------------- | 237 | *--------------------------------------------------------------------------- |
55 | */ | 238 | */ |
56 | static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) | 239 | static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) |
@@ -69,13 +252,9 @@ static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) | |||
69 | 252 | ||
70 | return usage; | 253 | return usage; |
71 | } | 254 | } |
255 | #endif /* HAVE_SDL_THREADS */ | ||
72 | 256 | ||
73 | #if NUM_CORES > 1 | 257 | #if NUM_CORES > 1 |
74 | /*--------------------------------------------------------------------------- | ||
75 | * Returns the maximum percentage of the core's idle stack ever used during | ||
76 | * runtime. | ||
77 | *--------------------------------------------------------------------------- | ||
78 | */ | ||
79 | int core_get_debug_info(unsigned int core, struct core_debug_info *infop) | 258 | int core_get_debug_info(unsigned int core, struct core_debug_info *infop) |
80 | { | 259 | { |
81 | extern uintptr_t * const idle_stacks[NUM_CORES]; | 260 | extern uintptr_t * const idle_stacks[NUM_CORES]; |
@@ -105,29 +284,29 @@ int thread_get_debug_info(unsigned int thread_id, | |||
105 | if (!infop) | 284 | if (!infop) |
106 | return -1; | 285 | return -1; |
107 | 286 | ||
108 | unsigned int slot = THREAD_ID_SLOT(thread_id); | 287 | unsigned int slotnum = THREAD_ID_SLOT(thread_id); |
109 | if (slot >= MAXTHREADS) | 288 | if (slotnum >= MAXTHREADS) |
110 | return -1; | 289 | return -1; |
111 | 290 | ||
112 | extern struct thread_entry threads[MAXTHREADS]; | 291 | struct thread_entry *thread = __thread_slot_entry(slotnum); |
113 | struct thread_entry *thread = &threads[slot]; | ||
114 | 292 | ||
115 | int oldlevel = disable_irq_save(); | 293 | int oldlevel = disable_irq_save(); |
116 | LOCK_THREAD(thread); | 294 | corelock_lock(&threadalloc.cl); |
295 | corelock_lock(&thread->slot_cl); | ||
117 | 296 | ||
118 | unsigned int state = thread->state; | 297 | unsigned int state = thread->state; |
119 | 298 | ||
120 | if (state != STATE_KILLED) | 299 | int ret = 0; |
121 | { | ||
122 | const char *name = thread->name; | ||
123 | if (!name) | ||
124 | name = ""; | ||
125 | 300 | ||
301 | if (threadbit_test_bit(&threadalloc.avail, slotnum) == 0) | ||
302 | { | ||
126 | bool cpu_boost = false; | 303 | bool cpu_boost = false; |
127 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 304 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
128 | cpu_boost = thread->cpu_boost; | 305 | cpu_boost = thread->cpu_boost; |
129 | #endif | 306 | #endif |
307 | #ifndef HAVE_SDL_THREADS | ||
130 | infop->stack_usage = stack_usage(thread->stack, thread->stack_size); | 308 | infop->stack_usage = stack_usage(thread->stack, thread->stack_size); |
309 | #endif | ||
131 | #if NUM_CORES > 1 | 310 | #if NUM_CORES > 1 |
132 | infop->core = thread->core; | 311 | infop->core = thread->core; |
133 | #endif | 312 | #endif |
@@ -140,13 +319,13 @@ int thread_get_debug_info(unsigned int thread_id, | |||
140 | cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), | 319 | cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), |
141 | status_chars[state]); | 320 | status_chars[state]); |
142 | 321 | ||
143 | const char *fmt = *name ? "%s" : "%s%08lX"; | 322 | format_thread_name(infop->name, sizeof (infop->name), thread); |
144 | snprintf(infop->name, sizeof (infop->name), fmt, name, | 323 | ret = 1; |
145 | thread->id); | ||
146 | } | 324 | } |
147 | 325 | ||
148 | UNLOCK_THREAD(thread); | 326 | corelock_unlock(&thread->slot_cl); |
327 | corelock_unlock(&threadalloc.cl); | ||
149 | restore_irq(oldlevel); | 328 | restore_irq(oldlevel); |
150 | 329 | ||
151 | return state == STATE_KILLED ? 0 : 1; | 330 | return ret; |
152 | } | 331 | } |
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h index 894bd1fe7c..10606a54a6 100644 --- a/firmware/kernel/thread-internal.h +++ b/firmware/kernel/thread-internal.h | |||
@@ -78,30 +78,11 @@ struct priority_distribution | |||
78 | 78 | ||
79 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 79 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
80 | 80 | ||
81 | #ifdef HAVE_CORELOCK_OBJECT | 81 | #define __rtr_queue lldc_head |
82 | /* Operations to be performed just before stopping a thread and starting | 82 | #define __rtr_queue_node lldc_node |
83 | a new one if specified before calling switch_thread */ | ||
84 | enum | ||
85 | { | ||
86 | TBOP_CLEAR = 0, /* No operation to do */ | ||
87 | TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ | ||
88 | TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ | ||
89 | }; | ||
90 | 83 | ||
91 | struct thread_blk_ops | 84 | #define __tmo_queue ll_head |
92 | { | 85 | #define __tmo_queue_node ll_node |
93 | struct corelock *cl_p; /* pointer to corelock */ | ||
94 | unsigned char flags; /* TBOP_* flags */ | ||
95 | }; | ||
96 | #endif /* NUM_CORES > 1 */ | ||
97 | |||
98 | /* Link information for lists thread is in */ | ||
99 | struct thread_entry; /* forward */ | ||
100 | struct thread_list | ||
101 | { | ||
102 | struct thread_entry *prev; /* Previous thread in a list */ | ||
103 | struct thread_entry *next; /* Next thread in a list */ | ||
104 | }; | ||
105 | 86 | ||
106 | /* Information kept in each thread slot | 87 | /* Information kept in each thread slot |
107 | * members are arranged according to size - largest first - in order | 88 | * members are arranged according to size - largest first - in order |
@@ -109,73 +90,64 @@ struct thread_list | |||
109 | */ | 90 | */ |
110 | struct thread_entry | 91 | struct thread_entry |
111 | { | 92 | { |
112 | struct regs context; /* Register context at switch - | 93 | struct regs context; /* Register context at switch - |
113 | _must_ be first member */ | 94 | _must_ be first member */ |
114 | uintptr_t *stack; /* Pointer to top of stack */ | 95 | #ifndef HAVE_SDL_THREADS |
115 | const char *name; /* Thread name */ | 96 | uintptr_t *stack; /* Pointer to top of stack */ |
116 | long tmo_tick; /* Tick when thread should be woken from | ||
117 | timeout - | ||
118 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
119 | struct thread_list l; /* Links for blocked/waking/running - | ||
120 | circular linkage in both directions */ | ||
121 | struct thread_list tmo; /* Links for timeout list - | ||
122 | Circular in reverse direction, NULL-terminated in | ||
123 | forward direction - | ||
124 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
125 | struct thread_entry **bqp; /* Pointer to list variable in kernel | ||
126 | object where thread is blocked - used | ||
127 | for implicit unblock and explicit wake | ||
128 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
129 | #ifdef HAVE_CORELOCK_OBJECT | ||
130 | struct corelock *obj_cl; /* Object corelock where thead is blocked - | ||
131 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
132 | struct corelock waiter_cl; /* Corelock for thread_wait */ | ||
133 | struct corelock slot_cl; /* Corelock to lock thread slot */ | ||
134 | unsigned char core; /* The core to which thread belongs */ | ||
135 | #endif | ||
136 | struct thread_entry *queue; /* List of threads waiting for thread to be | ||
137 | removed */ | ||
138 | #ifdef HAVE_WAKEUP_EXT_CB | ||
139 | void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that | ||
140 | performs special steps needed when being | ||
141 | forced off of an object's wait queue that | ||
142 | go beyond the standard wait queue removal | ||
143 | and priority disinheritance */ | ||
144 | /* Only enabled when using queue_send for now */ | ||
145 | #endif | 97 | #endif |
146 | #if defined(HAVE_SEMAPHORE_OBJECTS) || \ | 98 | const char *name; /* Thread name */ |
147 | defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ | 99 | long tmo_tick; /* Tick when thread should be woken */ |
148 | NUM_CORES > 1 | 100 | struct __rtr_queue_node rtr; /* Node for run queue */ |
149 | volatile intptr_t retval; /* Return value from a blocked operation/ | 101 | struct __tmo_queue_node tmo; /* Links for timeout list */ |
150 | misc. use */ | 102 | struct __wait_queue_node wq; /* Node for wait queue */ |
103 | struct __wait_queue *volatile wqp; /* Pointer to registered wait queue */ | ||
104 | #if NUM_CORES > 1 | ||
105 | struct corelock waiter_cl; /* Corelock for thread_wait */ | ||
106 | struct corelock slot_cl; /* Corelock to lock thread slot */ | ||
107 | unsigned char core; /* The core to which thread belongs */ | ||
151 | #endif | 108 | #endif |
152 | uint32_t id; /* Current slot id */ | 109 | struct __wait_queue queue; /* List of threads waiting for thread to be |
153 | int __errno; /* Thread error number (errno tls) */ | 110 | removed */ |
111 | volatile intptr_t retval; /* Return value from a blocked operation/ | ||
112 | misc. use */ | ||
113 | uint32_t id; /* Current slot id */ | ||
114 | int __errno; /* Thread error number (errno tls) */ | ||
154 | #ifdef HAVE_PRIORITY_SCHEDULING | 115 | #ifdef HAVE_PRIORITY_SCHEDULING |
155 | /* Priority summary of owned objects that support inheritance */ | 116 | /* Priority summary of owned objects that support inheritance */ |
156 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked | 117 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked |
157 | on an object that supports PIP - | 118 | on an object that supports PIP - |
158 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | 119 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ |
159 | struct priority_distribution pdist; /* Priority summary of owned objects | 120 | struct priority_distribution pdist; /* Priority summary of owned objects |
160 | that have blocked threads and thread's own | 121 | that have blocked threads and thread's own |
161 | base priority */ | 122 | base priority */ |
162 | int skip_count; /* Number of times skipped if higher priority | 123 | int skip_count; /* Number of times skipped if higher priority |
163 | thread was running */ | 124 | thread was running */ |
164 | unsigned char base_priority; /* Base priority (set explicitly during | 125 | unsigned char base_priority; /* Base priority (set explicitly during |
165 | creation or thread_set_priority) */ | 126 | creation or thread_set_priority) */ |
166 | unsigned char priority; /* Scheduled priority (higher of base or | 127 | unsigned char priority; /* Scheduled priority (higher of base or |
167 | all threads blocked by this one) */ | 128 | all threads blocked by this one) */ |
168 | #endif | 129 | #endif |
169 | unsigned short stack_size; /* Size of stack in bytes */ | 130 | #ifndef HAVE_SDL_THREADS |
170 | unsigned char state; /* Thread slot state (STATE_*) */ | 131 | unsigned short stack_size; /* Size of stack in bytes */ |
132 | #endif | ||
133 | unsigned char state; /* Thread slot state (STATE_*) */ | ||
171 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | 134 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
172 | unsigned char cpu_boost; /* CPU frequency boost flag */ | 135 | unsigned char cpu_boost; /* CPU frequency boost flag */ |
173 | #endif | 136 | #endif |
174 | #ifdef HAVE_IO_PRIORITY | 137 | #ifdef HAVE_IO_PRIORITY |
175 | unsigned char io_priority; | 138 | unsigned char io_priority; |
176 | #endif | 139 | #endif |
177 | }; | 140 | }; |
178 | 141 | ||
142 | /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ | ||
143 | #define THREAD_ID_VERSION_SHIFT 8 | ||
144 | #define THREAD_ID_VERSION_MASK 0xffffff00 | ||
145 | #define THREAD_ID_SLOT_MASK 0x000000ff | ||
146 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | ||
147 | #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) | ||
148 | |||
149 | #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) | ||
150 | |||
179 | /* Information kept for each core | 151 | /* Information kept for each core |
180 | * Members are arranged for the same reason as in thread_entry | 152 | * Members are arranged for the same reason as in thread_entry |
181 | */ | 153 | */ |
@@ -183,53 +155,97 @@ struct core_entry | |||
183 | { | 155 | { |
184 | /* "Active" lists - core is constantly active on these and are never | 156 | /* "Active" lists - core is constantly active on these and are never |
185 | locked and interrupts do not access them */ | 157 | locked and interrupts do not access them */ |
186 | struct thread_entry *running; /* threads that are running (RTR) */ | 158 | struct __rtr_queue rtr; /* Threads that are runnable */ |
187 | struct thread_entry *timeout; /* threads that are on a timeout before | 159 | struct __tmo_queue tmo; /* Threads on a bounded wait */ |
188 | running again */ | 160 | struct thread_entry *running; /* Currently running thread */ |
189 | struct thread_entry *block_task; /* Task going off running list */ | ||
190 | #ifdef HAVE_PRIORITY_SCHEDULING | 161 | #ifdef HAVE_PRIORITY_SCHEDULING |
191 | struct priority_distribution rtr; /* Summary of running and ready-to-run | 162 | struct priority_distribution rtr_dist; /* Summary of runnables */ |
192 | threads */ | ||
193 | #endif | 163 | #endif |
194 | long next_tmo_check; /* soonest time to check tmo threads */ | 164 | long next_tmo_check; /* Next due timeout check */ |
195 | #ifdef HAVE_CORELOCK_OBJECT | 165 | #if NUM_CORES > 1 |
196 | struct thread_blk_ops blk_ops; /* operations to perform when | 166 | struct corelock rtr_cl; /* Lock for rtr list */ |
197 | blocking a thread */ | ||
198 | struct corelock rtr_cl; /* Lock for rtr list */ | ||
199 | #endif /* NUM_CORES */ | 167 | #endif /* NUM_CORES */ |
200 | }; | 168 | }; |
201 | 169 | ||
202 | /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ | 170 | /* Hide a few scheduler details from itself to make allocation more flexible */ |
203 | #define THREAD_ID_VERSION_SHIFT 8 | 171 | #define __main_thread_name \ |
204 | #define THREAD_ID_VERSION_MASK 0xffffff00 | 172 | ({ extern const char __main_thread_name_str[]; \ |
205 | #define THREAD_ID_SLOT_MASK 0x000000ff | 173 | __main_thread_name_str; }) |
206 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | 174 | |
207 | #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) | 175 | static FORCE_INLINE |
176 | void * __get_main_stack(size_t *stacksize) | ||
177 | { | ||
178 | #if (CONFIG_PLATFORM & PLATFORM_NATIVE) | ||
179 | extern uintptr_t stackbegin[]; | ||
180 | extern uintptr_t stackend[]; | ||
181 | #else | ||
182 | extern uintptr_t *stackbegin; | ||
183 | extern uintptr_t *stackend; | ||
184 | #endif | ||
185 | *stacksize = (uintptr_t)stackend - (uintptr_t)stackbegin; | ||
186 | return stackbegin; | ||
187 | } | ||
208 | 188 | ||
209 | /* Thread locking */ | 189 | void format_thread_name(char *buf, size_t bufsize, |
190 | const struct thread_entry *thread); | ||
191 | |||
192 | static FORCE_INLINE | ||
193 | struct core_entry * __core_id_entry(unsigned int core) | ||
194 | { | ||
210 | #if NUM_CORES > 1 | 195 | #if NUM_CORES > 1 |
211 | #define LOCK_THREAD(thread) \ | 196 | extern struct core_entry * __cores[NUM_CORES]; |
212 | ({ corelock_lock(&(thread)->slot_cl); }) | 197 | return __cores[core]; |
213 | #define TRY_LOCK_THREAD(thread) \ | 198 | #else |
214 | ({ corelock_try_lock(&(thread)->slot_cl); }) | 199 | extern struct core_entry __cores[NUM_CORES]; |
215 | #define UNLOCK_THREAD(thread) \ | 200 | return &__cores[core]; |
216 | ({ corelock_unlock(&(thread)->slot_cl); }) | 201 | #endif |
217 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ | 202 | } |
218 | ({ unsigned int _core = (thread)->core; \ | ||
219 | cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \ | ||
220 | cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) | ||
221 | #else /* NUM_CORES == 1*/ | ||
222 | #define LOCK_THREAD(thread) \ | ||
223 | ({ (void)(thread); }) | ||
224 | #define TRY_LOCK_THREAD(thread) \ | ||
225 | ({ (void)(thread); }) | ||
226 | #define UNLOCK_THREAD(thread) \ | ||
227 | ({ (void)(thread); }) | ||
228 | #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ | ||
229 | ({ (void)(thread); }) | ||
230 | #endif /* NUM_CORES */ | ||
231 | 203 | ||
232 | #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) | 204 | #define __running_self_entry() \ |
205 | __core_id_entry(CURRENT_CORE)->running | ||
206 | |||
207 | static FORCE_INLINE | ||
208 | struct thread_entry * __thread_slot_entry(unsigned int slotnum) | ||
209 | { | ||
210 | extern struct thread_entry * __threads[MAXTHREADS]; | ||
211 | return __threads[slotnum]; | ||
212 | } | ||
213 | |||
214 | #define __thread_id_entry(id) \ | ||
215 | __thread_slot_entry(THREAD_ID_SLOT(id)) | ||
216 | |||
217 | #define THREAD_FROM(p, member) \ | ||
218 | container_of(p, struct thread_entry, member) | ||
219 | |||
220 | #define RTR_EMPTY(rtrp) \ | ||
221 | ({ (rtrp)->head == NULL; }) | ||
222 | |||
223 | #define RTR_THREAD_FIRST(rtrp) \ | ||
224 | ({ THREAD_FROM((rtrp)->head, rtr); }) | ||
225 | |||
226 | #define RTR_THREAD_NEXT(thread) \ | ||
227 | ({ THREAD_FROM((thread)->rtr.next, rtr); }) | ||
228 | |||
229 | #define TMO_THREAD_FIRST(tmop) \ | ||
230 | ({ struct __tmo_queue *__tmop = (tmop); \ | ||
231 | __tmop->head ? THREAD_FROM(__tmop->head, tmo) : NULL; }) | ||
232 | |||
233 | #define TMO_THREAD_NEXT(thread) \ | ||
234 | ({ struct __tmo_queue_node *__next = (thread)->tmo.next; \ | ||
235 | __next ? THREAD_FROM(__next, tmo) : NULL; }) | ||
236 | |||
237 | #define WQ_THREAD_FIRST(wqp) \ | ||
238 | ({ struct __wait_queue *__wqp = (wqp); \ | ||
239 | __wqp->head ? THREAD_FROM(__wqp->head, wq) : NULL; }) | ||
240 | |||
241 | #define WQ_THREAD_NEXT(thread) \ | ||
242 | ({ struct __wait_queue_node *__next = (thread)->wq.next; \ | ||
243 | __next ? THREAD_FROM(__next, wq) : NULL; }) | ||
244 | |||
245 | void thread_alloc_init(void) INIT_ATTR; | ||
246 | struct thread_entry * thread_alloc(void); | ||
247 | void thread_free(struct thread_entry *thread); | ||
248 | void new_thread_id(struct thread_entry *thread); | ||
233 | 249 | ||
234 | /* Switch to next runnable thread */ | 250 | /* Switch to next runnable thread */ |
235 | void switch_thread(void); | 251 | void switch_thread(void); |
@@ -237,7 +253,21 @@ void switch_thread(void); | |||
237 | * next tick) */ | 253 | * next tick) */ |
238 | void sleep_thread(int ticks); | 254 | void sleep_thread(int ticks); |
239 | /* Blocks the current thread on a thread queue (< 0 == infinite) */ | 255 | /* Blocks the current thread on a thread queue (< 0 == infinite) */ |
240 | void block_thread(struct thread_entry *current, int timeout); | 256 | void block_thread_(struct thread_entry *current, int timeout); |
257 | |||
258 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
259 | #define block_thread(thread, timeout, __wqp, bl) \ | ||
260 | ({ struct thread_entry *__t = (thread); \ | ||
261 | __t->wqp = (__wqp); \ | ||
262 | if (!__builtin_constant_p(bl) || (bl)) \ | ||
263 | __t->blocker = (bl); \ | ||
264 | block_thread_(__t, (timeout)); }) | ||
265 | #else | ||
266 | #define block_thread(thread, timeout, __wqp, bl...) \ | ||
267 | ({ struct thread_entry *__t = (thread); \ | ||
268 | __t->wqp = (__wqp); \ | ||
269 | block_thread_(__t, (timeout)); }) | ||
270 | #endif | ||
241 | 271 | ||
242 | /* Return bit flags for thread wakeup */ | 272 | /* Return bit flags for thread wakeup */ |
243 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ | 273 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ |
@@ -246,7 +276,7 @@ void block_thread(struct thread_entry *current, int timeout); | |||
246 | higher priority than current were woken) */ | 276 | higher priority than current were woken) */ |
247 | 277 | ||
248 | /* A convenience function for waking an entire queue of threads. */ | 278 | /* A convenience function for waking an entire queue of threads. */ |
249 | unsigned int thread_queue_wake(struct thread_entry **list); | 279 | unsigned int wait_queue_wake(struct __wait_queue *wqp); |
250 | 280 | ||
251 | /* Wakeup a thread at the head of a list */ | 281 | /* Wakeup a thread at the head of a list */ |
252 | enum wakeup_thread_protocol | 282 | enum wakeup_thread_protocol |
@@ -257,36 +287,139 @@ enum wakeup_thread_protocol | |||
257 | WAKEUP_TRANSFER_MULTI, | 287 | WAKEUP_TRANSFER_MULTI, |
258 | }; | 288 | }; |
259 | 289 | ||
260 | unsigned int wakeup_thread_(struct thread_entry **list | 290 | unsigned int wakeup_thread_(struct thread_entry *thread |
261 | IF_PRIO(, enum wakeup_thread_protocol proto)); | 291 | IF_PRIO(, enum wakeup_thread_protocol proto)); |
262 | 292 | ||
263 | #ifdef HAVE_PRIORITY_SCHEDULING | 293 | #ifdef HAVE_PRIORITY_SCHEDULING |
264 | #define wakeup_thread(list, proto) \ | 294 | #define wakeup_thread(thread, proto) \ |
265 | wakeup_thread_((list), (proto)) | 295 | wakeup_thread_((thread), (proto)) |
266 | #else /* !HAVE_PRIORITY_SCHEDULING */ | 296 | #else |
267 | #define wakeup_thread(list, proto...) \ | 297 | #define wakeup_thread(thread, proto...) \ |
268 | wakeup_thread_((list)); | 298 | wakeup_thread_((thread)); |
269 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 299 | #endif |
270 | 300 | ||
271 | #ifdef HAVE_IO_PRIORITY | 301 | #ifdef RB_PROFILE |
272 | void thread_set_io_priority(unsigned int thread_id, int io_priority); | 302 | void profile_thread(void); |
273 | int thread_get_io_priority(unsigned int thread_id); | ||
274 | #endif /* HAVE_IO_PRIORITY */ | ||
275 | #if NUM_CORES > 1 | ||
276 | unsigned int switch_core(unsigned int new_core); | ||
277 | #endif | 303 | #endif |
278 | 304 | ||
279 | /* Return the id of the calling thread. */ | 305 | static inline void rtr_queue_init(struct __rtr_queue *rtrp) |
280 | unsigned int thread_self(void); | 306 | { |
307 | lldc_init(rtrp); | ||
308 | } | ||
309 | |||
310 | static inline void rtr_queue_make_first(struct __rtr_queue *rtrp, | ||
311 | struct thread_entry *thread) | ||
312 | { | ||
313 | rtrp->head = &thread->rtr; | ||
314 | } | ||
281 | 315 | ||
282 | /* Return the thread_entry for the calling thread */ | 316 | static inline void rtr_queue_add(struct __rtr_queue *rtrp, |
283 | struct thread_entry* thread_self_entry(void); | 317 | struct thread_entry *thread) |
318 | { | ||
319 | lldc_insert_last(rtrp, &thread->rtr); | ||
320 | } | ||
284 | 321 | ||
285 | /* Return thread entry from id */ | 322 | static inline void rtr_queue_remove(struct __rtr_queue *rtrp, |
286 | struct thread_entry *thread_id_entry(unsigned int thread_id); | 323 | struct thread_entry *thread) |
324 | { | ||
325 | lldc_remove(rtrp, &thread->rtr); | ||
326 | } | ||
287 | 327 | ||
288 | #ifdef RB_PROFILE | 328 | #define TMO_NOT_QUEUED (NULL + 1) |
289 | void profile_thread(void); | 329 | |
330 | static inline bool tmo_is_queued(struct thread_entry *thread) | ||
331 | { | ||
332 | return thread->tmo.next != TMO_NOT_QUEUED; | ||
333 | } | ||
334 | |||
335 | static inline void tmo_set_dequeued(struct thread_entry *thread) | ||
336 | { | ||
337 | thread->tmo.next = TMO_NOT_QUEUED; | ||
338 | } | ||
339 | |||
340 | static inline void tmo_queue_init(struct __tmo_queue *tmop) | ||
341 | { | ||
342 | ll_init(tmop); | ||
343 | } | ||
344 | |||
345 | static inline void tmo_queue_expire(struct __tmo_queue *tmop, | ||
346 | struct thread_entry *prev, | ||
347 | struct thread_entry *thread) | ||
348 | { | ||
349 | ll_remove_next(tmop, prev ? &prev->tmo : NULL); | ||
350 | tmo_set_dequeued(thread); | ||
351 | } | ||
352 | |||
353 | static inline void tmo_queue_remove(struct __tmo_queue *tmop, | ||
354 | struct thread_entry *thread) | ||
355 | { | ||
356 | if (tmo_is_queued(thread)) | ||
357 | { | ||
358 | ll_remove(tmop, &thread->tmo); | ||
359 | tmo_set_dequeued(thread); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | static inline void tmo_queue_register(struct __tmo_queue *tmop, | ||
364 | struct thread_entry *thread) | ||
365 | { | ||
366 | if (!tmo_is_queued(thread)) | ||
367 | ll_insert_last(tmop, &thread->tmo); | ||
368 | } | ||
369 | |||
370 | static inline void wait_queue_init(struct __wait_queue *wqp) | ||
371 | { | ||
372 | lld_init(wqp); | ||
373 | } | ||
374 | |||
375 | static inline void wait_queue_register(struct thread_entry *thread) | ||
376 | { | ||
377 | lld_insert_last(thread->wqp, &thread->wq); | ||
378 | } | ||
379 | |||
380 | static inline struct __wait_queue * | ||
381 | wait_queue_ptr(struct thread_entry *thread) | ||
382 | { | ||
383 | return thread->wqp; | ||
384 | } | ||
385 | |||
386 | static inline struct __wait_queue * | ||
387 | wait_queue_remove(struct thread_entry *thread) | ||
388 | { | ||
389 | struct __wait_queue *wqp = thread->wqp; | ||
390 | thread->wqp = NULL; | ||
391 | lld_remove(wqp, &thread->wq); | ||
392 | return wqp; | ||
393 | } | ||
394 | |||
395 | static inline struct __wait_queue * | ||
396 | wait_queue_try_remove(struct thread_entry *thread) | ||
397 | { | ||
398 | struct __wait_queue *wqp = thread->wqp; | ||
399 | if (wqp) | ||
400 | { | ||
401 | thread->wqp = NULL; | ||
402 | lld_remove(wqp, &thread->wq); | ||
403 | } | ||
404 | |||
405 | return wqp; | ||
406 | } | ||
407 | |||
408 | static inline void blocker_init(struct blocker *bl) | ||
409 | { | ||
410 | bl->thread = NULL; | ||
411 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
412 | bl->priority = PRIORITY_IDLE; | ||
413 | #endif | ||
414 | } | ||
415 | |||
416 | static inline void blocker_splay_init(struct blocker_splay *blsplay) | ||
417 | { | ||
418 | blocker_init(&blsplay->blocker); | ||
419 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
420 | threadbit_clear(&blsplay->mask); | ||
290 | #endif | 421 | #endif |
422 | corelock_init(&blsplay->cl); | ||
423 | } | ||
291 | 424 | ||
292 | #endif /* THREAD_INTERNAL_H */ | 425 | #endif /* THREAD_INTERNAL_H */ |
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c index c148f6b76e..b916c3b521 100644 --- a/firmware/kernel/thread.c +++ b/firmware/kernel/thread.c | |||
@@ -37,11 +37,6 @@ | |||
37 | #endif | 37 | #endif |
38 | #include "core_alloc.h" | 38 | #include "core_alloc.h" |
39 | 39 | ||
40 | /**************************************************************************** | ||
41 | * ATTENTION!! * | ||
42 | * See notes below on implementing processor-specific portions! * | ||
43 | ***************************************************************************/ | ||
44 | |||
45 | /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ | 40 | /* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ |
46 | #ifdef DEBUG | 41 | #ifdef DEBUG |
47 | #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */ | 42 | #define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */ |
@@ -49,7 +44,11 @@ | |||
49 | #define THREAD_EXTRA_CHECKS 0 | 44 | #define THREAD_EXTRA_CHECKS 0 |
50 | #endif | 45 | #endif |
51 | 46 | ||
52 | /** | 47 | /**************************************************************************** |
48 | * ATTENTION!! * | ||
49 | * See notes below on implementing processor-specific portions! * | ||
50 | **************************************************************************** | ||
51 | * | ||
53 | * General locking order to guarantee progress. Order must be observed but | 52 | * General locking order to guarantee progress. Order must be observed but |
54 | * all stages are not nescessarily obligatory. Going from 1) to 3) is | 53 | * all stages are not nescessarily obligatory. Going from 1) to 3) is |
55 | * perfectly legal. | 54 | * perfectly legal. |
@@ -66,14 +65,14 @@ | |||
66 | * unlock and the other processor's handler may proceed at that time. Not | 65 | * unlock and the other processor's handler may proceed at that time. Not |
67 | * nescessary when the resource in question is definitely not available to | 66 | * nescessary when the resource in question is definitely not available to |
68 | * interrupt handlers. | 67 | * interrupt handlers. |
69 | * | 68 | * |
70 | * 2) Kernel Object | 69 | * 2) Kernel Object |
71 | * 1) May be needed beforehand if the kernel object allows dual-use such as | 70 | * 1) May be needed beforehand if the kernel object allows dual-use such as |
72 | * event queues. The kernel object must have a scheme to protect itself from | 71 | * event queues. The kernel object must have a scheme to protect itself from |
73 | * access by another processor and is responsible for serializing the calls | 72 | * access by another processor and is responsible for serializing the calls |
74 | * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each | 73 | * to block_thread and wakeup_thread both to themselves and to each other. |
75 | * other. Objects' queues are also protected here. | 74 | * Objects' queues are also protected here. |
76 | * | 75 | * |
77 | * 3) Thread Slot | 76 | * 3) Thread Slot |
78 | * This locks access to the thread's slot such that its state cannot be | 77 | * This locks access to the thread's slot such that its state cannot be |
79 | * altered by another processor when a state change is in progress such as | 78 | * altered by another processor when a state change is in progress such as |
@@ -121,68 +120,62 @@ | |||
121 | * available then some careful non-blocking synchonization is needed (as on | 120 | * available then some careful non-blocking synchonization is needed (as on |
122 | * PP targets at the moment). | 121 | * PP targets at the moment). |
123 | *--------------------------------------------------------------------------- | 122 | *--------------------------------------------------------------------------- |
123 | * | ||
124 | * | ||
125 | *--------------------------------------------------------------------------- | ||
126 | * Priority distribution structure (one category for each possible priority): | ||
127 | * | ||
128 | * +----+----+----+ ... +------+ | ||
129 | * hist: | F0 | F1 | F2 | | Fn-1 | | ||
130 | * +----+----+----+ ... +------+ | ||
131 | * mask: | b0 | b1 | b2 | | bn-1 | | ||
132 | * +----+----+----+ ... +------+ | ||
133 | * | ||
134 | * F = count of threads at priority category n (frequency) | ||
135 | * b = bitmask of non-zero priority categories (occupancy) | ||
136 | * | ||
137 | * / if H[n] != 0 : 1 | ||
138 | * b[n] = | | ||
139 | * \ else : 0 | ||
140 | * | ||
141 | *--------------------------------------------------------------------------- | ||
142 | * Basic priority inheritance priotocol (PIP): | ||
143 | * | ||
144 | * Mn = mutex n, Tn = thread n | ||
145 | * | ||
146 | * A lower priority thread inherits the priority of the highest priority | ||
147 | * thread blocked waiting for it to complete an action (such as release a | ||
148 | * mutex or respond to a message via queue_send): | ||
149 | * | ||
150 | * 1) T2->M1->T1 | ||
151 | * | ||
152 | * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher | ||
153 | * priority than T1 then T1 inherits the priority of T2. | ||
154 | * | ||
155 | * 2) T3 | ||
156 | * \/ | ||
157 | * T2->M1->T1 | ||
158 | * | ||
159 | * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so | ||
160 | * T1 inherits the higher of T2 and T3. | ||
161 | * | ||
162 | * 3) T3->M2->T2->M1->T1 | ||
163 | * | ||
164 | * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2, | ||
165 | * then T1 inherits the priority of T3 through T2. | ||
166 | * | ||
167 | * Blocking chains can grow arbitrarily complex (though it's best that they | ||
168 | * not form at all very often :) and build-up from these units. | ||
169 | *--------------------------------------------------------------------------- | ||
124 | */ | 170 | */ |
125 | 171 | static FORCE_INLINE void core_sleep(IF_COP_VOID(unsigned int core)); | |
126 | /* Cast to the the machine pointer size, whose size could be < 4 or > 32 | 172 | static FORCE_INLINE void store_context(void* addr); |
127 | * (someday :). */ | 173 | static FORCE_INLINE void load_context(const void* addr); |
128 | static struct core_entry cores[NUM_CORES] IBSS_ATTR; | ||
129 | struct thread_entry threads[MAXTHREADS] IBSS_ATTR; | ||
130 | |||
131 | static const char main_thread_name[] = "main"; | ||
132 | #if (CONFIG_PLATFORM & PLATFORM_NATIVE) | ||
133 | extern uintptr_t stackbegin[]; | ||
134 | extern uintptr_t stackend[]; | ||
135 | #else | ||
136 | extern uintptr_t *stackbegin; | ||
137 | extern uintptr_t *stackend; | ||
138 | #endif | ||
139 | |||
140 | static inline void core_sleep(IF_COP_VOID(unsigned int core)) | ||
141 | __attribute__((always_inline)); | ||
142 | |||
143 | void check_tmo_threads(void) | ||
144 | __attribute__((noinline)); | ||
145 | |||
146 | static inline void block_thread_on_l(struct thread_entry *thread, unsigned state) | ||
147 | __attribute__((always_inline)); | ||
148 | |||
149 | static void add_to_list_tmo(struct thread_entry *thread) | ||
150 | __attribute__((noinline)); | ||
151 | |||
152 | static void core_schedule_wakeup(struct thread_entry *thread) | ||
153 | __attribute__((noinline)); | ||
154 | |||
155 | #if NUM_CORES > 1 | ||
156 | static inline void run_blocking_ops( | ||
157 | unsigned int core, struct thread_entry *thread) | ||
158 | __attribute__((always_inline)); | ||
159 | #endif | ||
160 | |||
161 | static void thread_stkov(struct thread_entry *thread) | ||
162 | __attribute__((noinline)); | ||
163 | |||
164 | static inline void store_context(void* addr) | ||
165 | __attribute__((always_inline)); | ||
166 | |||
167 | static inline void load_context(const void* addr) | ||
168 | __attribute__((always_inline)); | ||
169 | |||
170 | #if NUM_CORES > 1 | ||
171 | static void thread_final_exit_do(struct thread_entry *current) | ||
172 | __attribute__((noinline)) NORETURN_ATTR USED_ATTR; | ||
173 | #else | ||
174 | static inline void thread_final_exit(struct thread_entry *current) | ||
175 | __attribute__((always_inline)) NORETURN_ATTR; | ||
176 | #endif | ||
177 | |||
178 | void switch_thread(void) | ||
179 | __attribute__((noinline)); | ||
180 | 174 | ||
181 | /**************************************************************************** | 175 | /**************************************************************************** |
182 | * Processor/OS-specific section - include necessary core support | 176 | * Processor/OS-specific section - include necessary core support |
183 | */ | 177 | */ |
184 | 178 | ||
185 | |||
186 | #include "asm/thread.c" | 179 | #include "asm/thread.c" |
187 | 180 | ||
188 | #if defined (CPU_PP) | 181 | #if defined (CPU_PP) |
@@ -193,20 +186,17 @@ void switch_thread(void) | |||
193 | * End Processor-specific section | 186 | * End Processor-specific section |
194 | ***************************************************************************/ | 187 | ***************************************************************************/ |
195 | 188 | ||
196 | static NO_INLINE | 189 | static NO_INLINE NORETURN_ATTR |
197 | void thread_panicf(const char *msg, struct thread_entry *thread) | 190 | void thread_panicf(const char *msg, struct thread_entry *thread) |
198 | { | 191 | { |
199 | IF_COP( const unsigned int core = thread->core; ) | 192 | IF_COP( const unsigned int core = thread->core; ) |
200 | static char namebuf[sizeof (((struct thread_debug_info *)0)->name)]; | 193 | static char name[sizeof (((struct thread_debug_info *)0)->name)]; |
201 | const char *name = thread->name; | 194 | format_thread_name(name, sizeof (name), thread); |
202 | if (!name) | ||
203 | name = ""; | ||
204 | snprintf(namebuf, sizeof (namebuf), *name ? "%s" : "%s%08lX", | ||
205 | name, (unsigned long)thread->id); | ||
206 | panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); | 195 | panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); |
196 | while (1); | ||
207 | } | 197 | } |
208 | 198 | ||
209 | static void thread_stkov(struct thread_entry *thread) | 199 | static NO_INLINE void thread_stkov(struct thread_entry *thread) |
210 | { | 200 | { |
211 | thread_panicf("Stkov", thread); | 201 | thread_panicf("Stkov", thread); |
212 | } | 202 | } |
@@ -218,36 +208,51 @@ static void thread_stkov(struct thread_entry *thread) | |||
218 | ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) | 208 | ({ if (!({ exp; })) thread_panicf((msg), (thread)); }) |
219 | #else | 209 | #else |
220 | #define THREAD_PANICF(msg, thread) \ | 210 | #define THREAD_PANICF(msg, thread) \ |
221 | do {} while (0) | 211 | do {} while (1) |
222 | #define THREAD_ASSERT(exp, msg, thread) \ | 212 | #define THREAD_ASSERT(exp, msg, thread) \ |
223 | do {} while (0) | 213 | do {} while (0) |
224 | #endif /* THREAD_EXTRA_CHECKS */ | 214 | #endif /* THREAD_EXTRA_CHECKS */ |
225 | 215 | ||
216 | /* Thread locking */ | ||
217 | #if NUM_CORES > 1 | ||
218 | #define LOCK_THREAD(thread) \ | ||
219 | ({ corelock_lock(&(thread)->slot_cl); }) | ||
220 | #define TRY_LOCK_THREAD(thread) \ | ||
221 | ({ corelock_try_lock(&(thread)->slot_cl); }) | ||
222 | #define UNLOCK_THREAD(thread) \ | ||
223 | ({ corelock_unlock(&(thread)->slot_cl); }) | ||
224 | #else /* NUM_CORES == 1*/ | ||
225 | #define LOCK_THREAD(thread) \ | ||
226 | ({ (void)(thread); }) | ||
227 | #define TRY_LOCK_THREAD(thread) \ | ||
228 | ({ (void)(thread); }) | ||
229 | #define UNLOCK_THREAD(thread) \ | ||
230 | ({ (void)(thread); }) | ||
231 | #endif /* NUM_CORES */ | ||
232 | |||
226 | /* RTR list */ | 233 | /* RTR list */ |
227 | #define RTR_LOCK(core) \ | 234 | #define RTR_LOCK(corep) \ |
228 | ({ corelock_lock(&cores[core].rtr_cl); }) | 235 | corelock_lock(&(corep)->rtr_cl) |
229 | #define RTR_UNLOCK(core) \ | 236 | #define RTR_UNLOCK(corep) \ |
230 | ({ corelock_unlock(&cores[core].rtr_cl); }) | 237 | corelock_unlock(&(corep)->rtr_cl) |
231 | 238 | ||
232 | #ifdef HAVE_PRIORITY_SCHEDULING | 239 | #ifdef HAVE_PRIORITY_SCHEDULING |
233 | #define rtr_add_entry(core, priority) \ | 240 | #define rtr_add_entry(corep, priority) \ |
234 | prio_add_entry(&cores[core].rtr, (priority)) | 241 | prio_add_entry(&(corep)->rtr_dist, (priority)) |
235 | 242 | #define rtr_subtract_entry(corep, priority) \ | |
236 | #define rtr_subtract_entry(core, priority) \ | 243 | prio_subtract_entry(&(corep)->rtr_dist, (priority)) |
237 | prio_subtract_entry(&cores[core].rtr, (priority)) | 244 | #define rtr_move_entry(corep, from, to) \ |
238 | 245 | prio_move_entry(&(corep)->rtr_dist, (from), (to)) | |
239 | #define rtr_move_entry(core, from, to) \ | 246 | #else /* !HAVE_PRIORITY_SCHEDULING */ |
240 | prio_move_entry(&cores[core].rtr, (from), (to)) | 247 | #define rtr_add_entry(corep, priority) \ |
241 | #else | 248 | do {} while (0) |
242 | #define rtr_add_entry(core, priority) | 249 | #define rtr_subtract_entry(corep, priority) \ |
243 | #define rtr_add_entry_inl(core, priority) | 250 | do {} while (0) |
244 | #define rtr_subtract_entry(core, priority) | 251 | #define rtr_move_entry(corep, from, to) \ |
245 | #define rtr_subtract_entry_inl(core, priotity) | 252 | do {} while (0) |
246 | #define rtr_move_entry(core, from, to) | 253 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
247 | #define rtr_move_entry_inl(core, from, to) | ||
248 | #endif | ||
249 | 254 | ||
250 | static inline void thread_store_context(struct thread_entry *thread) | 255 | static FORCE_INLINE void thread_store_context(struct thread_entry *thread) |
251 | { | 256 | { |
252 | #if (CONFIG_PLATFORM & PLATFORM_HOSTED) | 257 | #if (CONFIG_PLATFORM & PLATFORM_HOSTED) |
253 | thread->__errno = errno; | 258 | thread->__errno = errno; |
@@ -255,7 +260,7 @@ static inline void thread_store_context(struct thread_entry *thread) | |||
255 | store_context(&thread->context); | 260 | store_context(&thread->context); |
256 | } | 261 | } |
257 | 262 | ||
258 | static inline void thread_load_context(struct thread_entry *thread) | 263 | static FORCE_INLINE void thread_load_context(struct thread_entry *thread) |
259 | { | 264 | { |
260 | load_context(&thread->context); | 265 | load_context(&thread->context); |
261 | #if (CONFIG_PLATFORM & PLATFORM_HOSTED) | 266 | #if (CONFIG_PLATFORM & PLATFORM_HOSTED) |
@@ -263,272 +268,31 @@ static inline void thread_load_context(struct thread_entry *thread) | |||
263 | #endif | 268 | #endif |
264 | } | 269 | } |
265 | 270 | ||
266 | static inline unsigned int should_switch_tasks(void) | 271 | static FORCE_INLINE unsigned int |
272 | should_switch_tasks(struct thread_entry *thread) | ||
267 | { | 273 | { |
268 | unsigned int result = THREAD_OK; | ||
269 | |||
270 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
271 | struct thread_entry *current = cores[CURRENT_CORE].running; | ||
272 | if (current && | ||
273 | priobit_ffs(&cores[IF_COP_CORE(current->core)].rtr.mask) | ||
274 | < current->priority) | ||
275 | { | ||
276 | /* There is a thread ready to run of higher priority on the same | ||
277 | * core as the current one; recommend a task switch. */ | ||
278 | result |= THREAD_SWITCH; | ||
279 | } | ||
280 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
281 | |||
282 | return result; | ||
283 | } | ||
284 | |||
285 | #ifdef HAVE_PRIORITY_SCHEDULING | 274 | #ifdef HAVE_PRIORITY_SCHEDULING |
286 | /*--------------------------------------------------------------------------- | 275 | const unsigned int core = CURRENT_CORE; |
287 | * Locks the thread registered as the owner of the block and makes sure it | ||
288 | * didn't change in the meantime | ||
289 | *--------------------------------------------------------------------------- | ||
290 | */ | ||
291 | #if NUM_CORES == 1 | ||
292 | static inline struct thread_entry * lock_blocker_thread(struct blocker *bl) | ||
293 | { | ||
294 | return bl->thread; | ||
295 | } | ||
296 | #else /* NUM_CORES > 1 */ | ||
297 | static struct thread_entry * lock_blocker_thread(struct blocker *bl) | ||
298 | { | ||
299 | /* The blocker thread may change during the process of trying to | ||
300 | capture it */ | ||
301 | while (1) | ||
302 | { | ||
303 | struct thread_entry *t = bl->thread; | ||
304 | |||
305 | /* TRY, or else deadlocks are possible */ | ||
306 | if (!t) | ||
307 | { | ||
308 | struct blocker_splay *blsplay = (struct blocker_splay *)bl; | ||
309 | if (corelock_try_lock(&blsplay->cl)) | ||
310 | { | ||
311 | if (!bl->thread) | ||
312 | return NULL; /* Still multi */ | ||
313 | |||
314 | corelock_unlock(&blsplay->cl); | ||
315 | } | ||
316 | } | ||
317 | else | ||
318 | { | ||
319 | if (TRY_LOCK_THREAD(t)) | ||
320 | { | ||
321 | if (bl->thread == t) | ||
322 | return t; | ||
323 | |||
324 | UNLOCK_THREAD(t); | ||
325 | } | ||
326 | } | ||
327 | } | ||
328 | } | ||
329 | #endif /* NUM_CORES */ | ||
330 | |||
331 | static inline void unlock_blocker_thread(struct blocker *bl) | ||
332 | { | ||
333 | #if NUM_CORES > 1 | 276 | #if NUM_CORES > 1 |
334 | struct thread_entry *blt = bl->thread; | 277 | /* Forget about it if different CPU */ |
335 | if (blt) | 278 | if (thread->core != core) |
336 | UNLOCK_THREAD(blt); | 279 | return THREAD_OK; |
337 | else | 280 | #endif |
338 | corelock_unlock(&((struct blocker_splay *)bl)->cl); | 281 | /* Just woke something therefore a thread is on the run queue */ |
339 | #endif /* NUM_CORES > 1*/ | 282 | struct thread_entry *current = |
340 | (void)bl; | 283 | RTR_THREAD_FIRST(&__core_id_entry(core)->rtr); |
341 | } | 284 | if (LIKELY(thread->priority >= current->priority)) |
285 | return THREAD_OK; | ||
286 | |||
287 | /* There is a thread ready to run of higher priority on the same | ||
288 | * core as the current one; recommend a task switch. */ | ||
289 | return THREAD_OK | THREAD_SWITCH; | ||
290 | #else | ||
291 | return THREAD_OK; | ||
342 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 292 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
343 | |||
344 | /*--------------------------------------------------------------------------- | ||
345 | * Thread list structure - circular: | ||
346 | * +------------------------------+ | ||
347 | * | | | ||
348 | * +--+---+<-+---+<-+---+<-+---+<-+ | ||
349 | * Head->| T | | T | | T | | T | | ||
350 | * +->+---+->+---+->+---+->+---+--+ | ||
351 | * | | | ||
352 | * +------------------------------+ | ||
353 | *--------------------------------------------------------------------------- | ||
354 | */ | ||
355 | |||
356 | /*--------------------------------------------------------------------------- | ||
357 | * Adds a thread to a list of threads using "insert last". Uses the "l" | ||
358 | * links. | ||
359 | *--------------------------------------------------------------------------- | ||
360 | */ | ||
361 | static void add_to_list_l(struct thread_entry **list, | ||
362 | struct thread_entry *thread) | ||
363 | { | ||
364 | struct thread_entry *l = *list; | ||
365 | |||
366 | if (l == NULL) | ||
367 | { | ||
368 | /* Insert into unoccupied list */ | ||
369 | thread->l.prev = thread; | ||
370 | thread->l.next = thread; | ||
371 | *list = thread; | ||
372 | return; | ||
373 | } | ||
374 | |||
375 | /* Insert last */ | ||
376 | thread->l.prev = l->l.prev; | ||
377 | thread->l.next = l; | ||
378 | l->l.prev->l.next = thread; | ||
379 | l->l.prev = thread; | ||
380 | } | ||
381 | |||
382 | /*--------------------------------------------------------------------------- | ||
383 | * Removes a thread from a list of threads. Uses the "l" links. | ||
384 | *--------------------------------------------------------------------------- | ||
385 | */ | ||
386 | static void remove_from_list_l(struct thread_entry **list, | ||
387 | struct thread_entry *thread) | ||
388 | { | ||
389 | struct thread_entry *prev, *next; | ||
390 | |||
391 | next = thread->l.next; | ||
392 | |||
393 | if (thread == next) | ||
394 | { | ||
395 | /* The only item */ | ||
396 | *list = NULL; | ||
397 | return; | ||
398 | } | ||
399 | |||
400 | if (thread == *list) | ||
401 | { | ||
402 | /* List becomes next item */ | ||
403 | *list = next; | ||
404 | } | ||
405 | |||
406 | prev = thread->l.prev; | ||
407 | |||
408 | /* Fix links to jump over the removed entry. */ | ||
409 | next->l.prev = prev; | ||
410 | prev->l.next = next; | ||
411 | } | ||
412 | |||
413 | /*--------------------------------------------------------------------------- | ||
414 | * Timeout list structure - circular reverse (to make "remove item" O(1)), | ||
415 | * NULL-terminated forward (to ease the far more common forward traversal): | ||
416 | * +------------------------------+ | ||
417 | * | | | ||
418 | * +--+---+<-+---+<-+---+<-+---+<-+ | ||
419 | * Head->| T | | T | | T | | T | | ||
420 | * +---+->+---+->+---+->+---+-X | ||
421 | *--------------------------------------------------------------------------- | ||
422 | */ | ||
423 | |||
424 | /*--------------------------------------------------------------------------- | ||
425 | * Add a thread from the core's timout list by linking the pointers in its | ||
426 | * tmo structure. | ||
427 | *--------------------------------------------------------------------------- | ||
428 | */ | ||
429 | static void add_to_list_tmo(struct thread_entry *thread) | ||
430 | { | ||
431 | struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout; | ||
432 | THREAD_ASSERT(thread->tmo.prev == NULL, | ||
433 | "add_to_list_tmo->already listed", thread); | ||
434 | |||
435 | thread->tmo.next = NULL; | ||
436 | |||
437 | if (tmo == NULL) | ||
438 | { | ||
439 | /* Insert into unoccupied list */ | ||
440 | thread->tmo.prev = thread; | ||
441 | cores[IF_COP_CORE(thread->core)].timeout = thread; | ||
442 | return; | ||
443 | } | ||
444 | |||
445 | /* Insert Last */ | ||
446 | thread->tmo.prev = tmo->tmo.prev; | ||
447 | tmo->tmo.prev->tmo.next = thread; | ||
448 | tmo->tmo.prev = thread; | ||
449 | } | ||
450 | |||
451 | /*--------------------------------------------------------------------------- | ||
452 | * Remove a thread from the core's timout list by unlinking the pointers in | ||
453 | * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout | ||
454 | * is cancelled. | ||
455 | *--------------------------------------------------------------------------- | ||
456 | */ | ||
457 | static void remove_from_list_tmo(struct thread_entry *thread) | ||
458 | { | ||
459 | struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout; | ||
460 | struct thread_entry *prev = thread->tmo.prev; | ||
461 | struct thread_entry *next = thread->tmo.next; | ||
462 | |||
463 | THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread); | ||
464 | |||
465 | if (next != NULL) | ||
466 | next->tmo.prev = prev; | ||
467 | |||
468 | if (thread == *list) | ||
469 | { | ||
470 | /* List becomes next item and empty if next == NULL */ | ||
471 | *list = next; | ||
472 | /* Mark as unlisted */ | ||
473 | thread->tmo.prev = NULL; | ||
474 | } | ||
475 | else | ||
476 | { | ||
477 | if (next == NULL) | ||
478 | (*list)->tmo.prev = prev; | ||
479 | prev->tmo.next = next; | ||
480 | /* Mark as unlisted */ | ||
481 | thread->tmo.prev = NULL; | ||
482 | } | ||
483 | } | 293 | } |
484 | 294 | ||
485 | #ifdef HAVE_PRIORITY_SCHEDULING | 295 | #ifdef HAVE_PRIORITY_SCHEDULING |
486 | /*--------------------------------------------------------------------------- | ||
487 | * Priority distribution structure (one category for each possible priority): | ||
488 | * | ||
489 | * +----+----+----+ ... +-----+ | ||
490 | * hist: | F0 | F1 | F2 | | F31 | | ||
491 | * +----+----+----+ ... +-----+ | ||
492 | * mask: | b0 | b1 | b2 | | b31 | | ||
493 | * +----+----+----+ ... +-----+ | ||
494 | * | ||
495 | * F = count of threads at priority category n (frequency) | ||
496 | * b = bitmask of non-zero priority categories (occupancy) | ||
497 | * | ||
498 | * / if H[n] != 0 : 1 | ||
499 | * b[n] = | | ||
500 | * \ else : 0 | ||
501 | * | ||
502 | *--------------------------------------------------------------------------- | ||
503 | * Basic priority inheritance priotocol (PIP): | ||
504 | * | ||
505 | * Mn = mutex n, Tn = thread n | ||
506 | * | ||
507 | * A lower priority thread inherits the priority of the highest priority | ||
508 | * thread blocked waiting for it to complete an action (such as release a | ||
509 | * mutex or respond to a message via queue_send): | ||
510 | * | ||
511 | * 1) T2->M1->T1 | ||
512 | * | ||
513 | * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher | ||
514 | * priority than T1 then T1 inherits the priority of T2. | ||
515 | * | ||
516 | * 2) T3 | ||
517 | * \/ | ||
518 | * T2->M1->T1 | ||
519 | * | ||
520 | * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so | ||
521 | * T1 inherits the higher of T2 and T3. | ||
522 | * | ||
523 | * 3) T3->M2->T2->M1->T1 | ||
524 | * | ||
525 | * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2, | ||
526 | * then T1 inherits the priority of T3 through T2. | ||
527 | * | ||
528 | * Blocking chains can grow arbitrarily complex (though it's best that they | ||
529 | * not form at all very often :) and build-up from these units. | ||
530 | *--------------------------------------------------------------------------- | ||
531 | */ | ||
532 | 296 | ||
533 | /*--------------------------------------------------------------------------- | 297 | /*--------------------------------------------------------------------------- |
534 | * Increment frequency at category "priority" | 298 | * Increment frequency at category "priority" |
@@ -569,25 +333,86 @@ static inline void prio_move_entry( | |||
569 | if (++pd->hist[to] == 1) | 333 | if (++pd->hist[to] == 1) |
570 | priobit_set_bit(&pd->mask, to); | 334 | priobit_set_bit(&pd->mask, to); |
571 | } | 335 | } |
336 | |||
572 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 337 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
573 | 338 | ||
574 | /*--------------------------------------------------------------------------- | 339 | /*--------------------------------------------------------------------------- |
575 | * Move a thread back to a running state on its core. | 340 | * Common init for new thread basic info |
576 | *--------------------------------------------------------------------------- | 341 | *--------------------------------------------------------------------------- |
577 | */ | 342 | */ |
578 | static void core_schedule_wakeup(struct thread_entry *thread) | 343 | static void new_thread_base_init(struct thread_entry *thread, |
344 | void **stackp, size_t *stack_sizep, | ||
345 | const char *name IF_PRIO(, int priority) | ||
346 | IF_COP(, unsigned int core)) | ||
579 | { | 347 | { |
580 | const unsigned int core = IF_COP_CORE(thread->core); | 348 | ALIGN_BUFFER(*stackp, *stack_sizep, MIN_STACK_ALIGN); |
349 | thread->stack = *stackp; | ||
350 | thread->stack_size = *stack_sizep; | ||
581 | 351 | ||
582 | RTR_LOCK(core); | 352 | thread->name = name; |
353 | wait_queue_init(&thread->queue); | ||
354 | thread->wqp = NULL; | ||
355 | tmo_set_dequeued(thread); | ||
356 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
357 | thread->skip_count = 0; | ||
358 | thread->blocker = NULL; | ||
359 | thread->base_priority = priority; | ||
360 | thread->priority = priority; | ||
361 | memset(&thread->pdist, 0, sizeof(thread->pdist)); | ||
362 | prio_add_entry(&thread->pdist, priority); | ||
363 | #endif | ||
364 | #if NUM_CORES > 1 | ||
365 | thread->core = core; | ||
366 | #endif | ||
367 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
368 | thread->cpu_boost = 0; | ||
369 | #endif | ||
370 | #ifdef HAVE_IO_PRIORITY | ||
371 | /* Default to high (foreground) priority */ | ||
372 | thread->io_priority = IO_PRIORITY_IMMEDIATE; | ||
373 | #endif | ||
374 | } | ||
583 | 375 | ||
376 | /*--------------------------------------------------------------------------- | ||
377 | * Move a thread onto the core's run queue and promote it | ||
378 | *--------------------------------------------------------------------------- | ||
379 | */ | ||
380 | static inline void core_rtr_add(struct core_entry *corep, | ||
381 | struct thread_entry *thread) | ||
382 | { | ||
383 | RTR_LOCK(corep); | ||
384 | rtr_queue_add(&corep->rtr, thread); | ||
385 | rtr_add_entry(corep, thread->priority); | ||
386 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
387 | thread->skip_count = thread->base_priority; | ||
388 | #endif | ||
584 | thread->state = STATE_RUNNING; | 389 | thread->state = STATE_RUNNING; |
390 | RTR_UNLOCK(corep); | ||
391 | } | ||
585 | 392 | ||
586 | add_to_list_l(&cores[core].running, thread); | 393 | /*--------------------------------------------------------------------------- |
587 | rtr_add_entry(core, thread->priority); | 394 | * Remove a thread from the core's run queue |
588 | 395 | *--------------------------------------------------------------------------- | |
589 | RTR_UNLOCK(core); | 396 | */ |
397 | static inline void core_rtr_remove(struct core_entry *corep, | ||
398 | struct thread_entry *thread) | ||
399 | { | ||
400 | RTR_LOCK(corep); | ||
401 | rtr_queue_remove(&corep->rtr, thread); | ||
402 | rtr_subtract_entry(corep, thread->priority); | ||
403 | /* Does not demote state */ | ||
404 | RTR_UNLOCK(corep); | ||
405 | } | ||
590 | 406 | ||
407 | /*--------------------------------------------------------------------------- | ||
408 | * Move a thread back to a running state on its core | ||
409 | *--------------------------------------------------------------------------- | ||
410 | */ | ||
411 | static NO_INLINE void core_schedule_wakeup(struct thread_entry *thread) | ||
412 | { | ||
413 | const unsigned int core = IF_COP_CORE(thread->core); | ||
414 | struct core_entry *corep = __core_id_entry(core); | ||
415 | core_rtr_add(corep, thread); | ||
591 | #if NUM_CORES > 1 | 416 | #if NUM_CORES > 1 |
592 | if (core != CURRENT_CORE) | 417 | if (core != CURRENT_CORE) |
593 | core_wake(core); | 418 | core_wake(core); |
@@ -596,17 +421,75 @@ static void core_schedule_wakeup(struct thread_entry *thread) | |||
596 | 421 | ||
597 | #ifdef HAVE_PRIORITY_SCHEDULING | 422 | #ifdef HAVE_PRIORITY_SCHEDULING |
598 | /*--------------------------------------------------------------------------- | 423 | /*--------------------------------------------------------------------------- |
424 | * Locks the thread registered as the owner of the block and makes sure it | ||
425 | * didn't change in the meantime | ||
426 | *--------------------------------------------------------------------------- | ||
427 | */ | ||
428 | #if NUM_CORES == 1 | ||
429 | static inline struct thread_entry * lock_blocker_thread(struct blocker *bl) | ||
430 | { | ||
431 | return bl->thread; | ||
432 | } | ||
433 | #else /* NUM_CORES > 1 */ | ||
434 | static struct thread_entry * lock_blocker_thread(struct blocker *bl) | ||
435 | { | ||
436 | /* The blocker thread may change during the process of trying to | ||
437 | capture it */ | ||
438 | while (1) | ||
439 | { | ||
440 | struct thread_entry *t = bl->thread; | ||
441 | |||
442 | /* TRY, or else deadlocks are possible */ | ||
443 | if (!t) | ||
444 | { | ||
445 | struct blocker_splay *blsplay = (struct blocker_splay *)bl; | ||
446 | if (corelock_try_lock(&blsplay->cl)) | ||
447 | { | ||
448 | if (!bl->thread) | ||
449 | return NULL; /* Still multi */ | ||
450 | |||
451 | corelock_unlock(&blsplay->cl); | ||
452 | } | ||
453 | } | ||
454 | else | ||
455 | { | ||
456 | if (TRY_LOCK_THREAD(t)) | ||
457 | { | ||
458 | if (bl->thread == t) | ||
459 | return t; | ||
460 | |||
461 | UNLOCK_THREAD(t); | ||
462 | } | ||
463 | } | ||
464 | } | ||
465 | } | ||
466 | #endif /* NUM_CORES */ | ||
467 | |||
468 | static inline void unlock_blocker_thread(struct blocker *bl) | ||
469 | { | ||
470 | #if NUM_CORES > 1 | ||
471 | struct thread_entry *blt = bl->thread; | ||
472 | if (blt) | ||
473 | UNLOCK_THREAD(blt); | ||
474 | else | ||
475 | corelock_unlock(&((struct blocker_splay *)bl)->cl); | ||
476 | #endif /* NUM_CORES > 1*/ | ||
477 | (void)bl; | ||
478 | } | ||
479 | |||
480 | /*--------------------------------------------------------------------------- | ||
599 | * Change the priority and rtr entry for a running thread | 481 | * Change the priority and rtr entry for a running thread |
600 | *--------------------------------------------------------------------------- | 482 | *--------------------------------------------------------------------------- |
601 | */ | 483 | */ |
602 | static inline void set_running_thread_priority( | 484 | static inline void set_rtr_thread_priority( |
603 | struct thread_entry *thread, int priority) | 485 | struct thread_entry *thread, int priority) |
604 | { | 486 | { |
605 | const unsigned int core = IF_COP_CORE(thread->core); | 487 | const unsigned int core = IF_COP_CORE(thread->core); |
606 | RTR_LOCK(core); | 488 | struct core_entry *corep = __core_id_entry(core); |
607 | rtr_move_entry(core, thread->priority, priority); | 489 | RTR_LOCK(corep); |
490 | rtr_move_entry(corep, thread->priority, priority); | ||
608 | thread->priority = priority; | 491 | thread->priority = priority; |
609 | RTR_UNLOCK(core); | 492 | RTR_UNLOCK(corep); |
610 | } | 493 | } |
611 | 494 | ||
612 | /*--------------------------------------------------------------------------- | 495 | /*--------------------------------------------------------------------------- |
@@ -619,30 +502,21 @@ static inline void set_running_thread_priority( | |||
619 | * penalty under high contention. | 502 | * penalty under high contention. |
620 | *--------------------------------------------------------------------------- | 503 | *--------------------------------------------------------------------------- |
621 | */ | 504 | */ |
622 | static int find_highest_priority_in_list_l( | 505 | static int wait_queue_find_priority(struct __wait_queue *wqp) |
623 | struct thread_entry * const thread) | ||
624 | { | 506 | { |
625 | if (LIKELY(thread != NULL)) | 507 | int highest_priority = PRIORITY_IDLE; |
626 | { | 508 | struct thread_entry *thread = WQ_THREAD_FIRST(wqp); |
627 | /* Go though list until the ending up at the initial thread */ | ||
628 | int highest_priority = thread->priority; | ||
629 | struct thread_entry *curr = thread; | ||
630 | 509 | ||
631 | do | 510 | while (thread != NULL) |
632 | { | 511 | { |
633 | int priority = curr->priority; | 512 | int priority = thread->priority; |
634 | 513 | if (priority < highest_priority) | |
635 | if (priority < highest_priority) | 514 | highest_priority = priority; |
636 | highest_priority = priority; | ||
637 | |||
638 | curr = curr->l.next; | ||
639 | } | ||
640 | while (curr != thread); | ||
641 | 515 | ||
642 | return highest_priority; | 516 | thread = WQ_THREAD_NEXT(thread); |
643 | } | 517 | } |
644 | 518 | ||
645 | return PRIORITY_IDLE; | 519 | return highest_priority; |
646 | } | 520 | } |
647 | 521 | ||
648 | /*--------------------------------------------------------------------------- | 522 | /*--------------------------------------------------------------------------- |
@@ -666,7 +540,7 @@ static void inherit_priority( | |||
666 | { | 540 | { |
667 | /* Multiple owners */ | 541 | /* Multiple owners */ |
668 | struct blocker_splay *blsplay = (struct blocker_splay *)bl; | 542 | struct blocker_splay *blsplay = (struct blocker_splay *)bl; |
669 | 543 | ||
670 | /* Recurse down the all the branches of this; it's the only way. | 544 | /* Recurse down the all the branches of this; it's the only way. |
671 | We might meet the same queue several times if more than one of | 545 | We might meet the same queue several times if more than one of |
672 | these threads is waiting the same queue. That isn't a problem | 546 | these threads is waiting the same queue. That isn't a problem |
@@ -674,7 +548,7 @@ static void inherit_priority( | |||
674 | FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum) | 548 | FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum) |
675 | { | 549 | { |
676 | bl->priority = oldblpr; /* To see the change each time */ | 550 | bl->priority = oldblpr; /* To see the change each time */ |
677 | blt = &threads[slotnum]; | 551 | blt = __thread_slot_entry(slotnum); |
678 | LOCK_THREAD(blt); | 552 | LOCK_THREAD(blt); |
679 | inherit_priority(blocker0, bl, blt, newblpr); | 553 | inherit_priority(blocker0, bl, blt, newblpr); |
680 | } | 554 | } |
@@ -699,7 +573,7 @@ static void inherit_priority( | |||
699 | 573 | ||
700 | if (blt->state == STATE_RUNNING) | 574 | if (blt->state == STATE_RUNNING) |
701 | { | 575 | { |
702 | set_running_thread_priority(blt, newpr); | 576 | set_rtr_thread_priority(blt, newpr); |
703 | break; /* Running: last in chain */ | 577 | break; /* Running: last in chain */ |
704 | } | 578 | } |
705 | 579 | ||
@@ -714,7 +588,7 @@ static void inherit_priority( | |||
714 | break; /* Full circle - deadlock! */ | 588 | break; /* Full circle - deadlock! */ |
715 | 589 | ||
716 | /* Blocker becomes current thread and the process repeats */ | 590 | /* Blocker becomes current thread and the process repeats */ |
717 | struct thread_entry **bqp = blt->bqp; | 591 | struct __wait_queue *wqp = wait_queue_ptr(blt); |
718 | struct thread_entry *t = blt; | 592 | struct thread_entry *t = blt; |
719 | blt = lock_blocker_thread(bl); | 593 | blt = lock_blocker_thread(bl); |
720 | 594 | ||
@@ -725,7 +599,7 @@ static void inherit_priority( | |||
725 | if (newpr <= oldblpr) | 599 | if (newpr <= oldblpr) |
726 | newblpr = newpr; | 600 | newblpr = newpr; |
727 | else if (oldpr <= oldblpr) | 601 | else if (oldpr <= oldblpr) |
728 | newblpr = find_highest_priority_in_list_l(*bqp); | 602 | newblpr = wait_queue_find_priority(wqp); |
729 | 603 | ||
730 | if (newblpr == oldblpr) | 604 | if (newblpr == oldblpr) |
731 | break; /* Queue priority not changing */ | 605 | break; /* Queue priority not changing */ |
@@ -735,22 +609,46 @@ static void inherit_priority( | |||
735 | } | 609 | } |
736 | 610 | ||
737 | /*--------------------------------------------------------------------------- | 611 | /*--------------------------------------------------------------------------- |
738 | * Quick-disinherit of priority elevation. 'thread' must be a running thread. | 612 | * Quick-inherit of priority elevation. 'thread' must be not runnable |
739 | *--------------------------------------------------------------------------- | 613 | *--------------------------------------------------------------------------- |
740 | */ | 614 | */ |
741 | static void priority_disinherit_internal(struct thread_entry *thread, | 615 | static void priority_inherit_internal_inner(struct thread_entry *thread, |
742 | int blpr) | 616 | int blpr) |
617 | { | ||
618 | if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < thread->priority) | ||
619 | thread->priority = blpr; | ||
620 | } | ||
621 | |||
622 | static inline void priority_inherit_internal(struct thread_entry *thread, | ||
623 | int blpr) | ||
743 | { | 624 | { |
744 | if (blpr < PRIORITY_IDLE && | 625 | if (blpr < PRIORITY_IDLE) |
745 | prio_subtract_entry(&thread->pdist, blpr) == 0 && | 626 | priority_inherit_internal_inner(thread, blpr); |
627 | } | ||
628 | |||
629 | /*--------------------------------------------------------------------------- | ||
630 | * Quick-disinherit of priority elevation. 'thread' must current | ||
631 | *--------------------------------------------------------------------------- | ||
632 | */ | ||
633 | static void priority_disinherit_internal_inner(struct thread_entry *thread, | ||
634 | int blpr) | ||
635 | { | ||
636 | if (prio_subtract_entry(&thread->pdist, blpr) == 0 && | ||
746 | blpr <= thread->priority) | 637 | blpr <= thread->priority) |
747 | { | 638 | { |
748 | int priority = priobit_ffs(&thread->pdist.mask); | 639 | int priority = priobit_ffs(&thread->pdist.mask); |
749 | if (priority != thread->priority) | 640 | if (priority != thread->priority) |
750 | set_running_thread_priority(thread, priority); | 641 | set_rtr_thread_priority(thread, priority); |
751 | } | 642 | } |
752 | } | 643 | } |
753 | 644 | ||
645 | static inline void priority_disinherit_internal(struct thread_entry *thread, | ||
646 | int blpr) | ||
647 | { | ||
648 | if (blpr < PRIORITY_IDLE) | ||
649 | priority_disinherit_internal_inner(thread, blpr); | ||
650 | } | ||
651 | |||
754 | void priority_disinherit(struct thread_entry *thread, struct blocker *bl) | 652 | void priority_disinherit(struct thread_entry *thread, struct blocker *bl) |
755 | { | 653 | { |
756 | LOCK_THREAD(thread); | 654 | LOCK_THREAD(thread); |
@@ -767,30 +665,32 @@ static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread) | |||
767 | { | 665 | { |
768 | /* All threads will have the same blocker and queue; only we are changing | 666 | /* All threads will have the same blocker and queue; only we are changing |
769 | it now */ | 667 | it now */ |
770 | struct thread_entry **bqp = thread->bqp; | 668 | struct __wait_queue *wqp = wait_queue_ptr(thread); |
771 | struct blocker_splay *blsplay = (struct blocker_splay *)thread->blocker; | 669 | struct blocker *bl = thread->blocker; |
772 | struct thread_entry *blt = blsplay->blocker.thread; | 670 | struct blocker_splay *blsplay = (struct blocker_splay *)bl; |
671 | struct thread_entry *blt = bl->thread; | ||
773 | 672 | ||
774 | /* The first thread is already locked and is assumed tagged "multi" */ | 673 | /* The first thread is already locked and is assumed tagged "multi" */ |
775 | int count = 1; | 674 | int count = 1; |
776 | struct thread_entry *temp_queue = NULL; | ||
777 | 675 | ||
778 | /* 'thread' is locked on entry */ | 676 | /* Multiple versions of the wait queue may be seen if doing more than |
677 | one thread; queue removal isn't destructive to the pointers of the node | ||
678 | being removed; this may lead to the blocker priority being wrong for a | ||
679 | time but it gets fixed up below after getting exclusive access to the | ||
680 | queue */ | ||
779 | while (1) | 681 | while (1) |
780 | { | 682 | { |
781 | LOCK_THREAD(blt); | ||
782 | |||
783 | remove_from_list_l(bqp, thread); | ||
784 | thread->blocker = NULL; | 683 | thread->blocker = NULL; |
684 | wait_queue_remove(thread); | ||
785 | 685 | ||
786 | struct thread_entry *tnext = *bqp; | 686 | unsigned int slotnum = THREAD_ID_SLOT(thread->id); |
687 | threadbit_set_bit(&blsplay->mask, slotnum); | ||
688 | |||
689 | struct thread_entry *tnext = WQ_THREAD_NEXT(thread); | ||
787 | if (tnext == NULL || tnext->retval == 0) | 690 | if (tnext == NULL || tnext->retval == 0) |
788 | break; | 691 | break; |
789 | 692 | ||
790 | add_to_list_l(&temp_queue, thread); | ||
791 | |||
792 | UNLOCK_THREAD(thread); | 693 | UNLOCK_THREAD(thread); |
793 | UNLOCK_THREAD(blt); | ||
794 | 694 | ||
795 | count++; | 695 | count++; |
796 | thread = tnext; | 696 | thread = tnext; |
@@ -798,65 +698,51 @@ static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread) | |||
798 | LOCK_THREAD(thread); | 698 | LOCK_THREAD(thread); |
799 | } | 699 | } |
800 | 700 | ||
801 | int blpr = blsplay->blocker.priority; | ||
802 | priority_disinherit_internal(blt, blpr); | ||
803 | |||
804 | /* Locking order reverses here since the threads are no longer on the | 701 | /* Locking order reverses here since the threads are no longer on the |
805 | queue side */ | 702 | queued side */ |
806 | if (count > 1) | 703 | if (count > 1) |
807 | { | ||
808 | add_to_list_l(&temp_queue, thread); | ||
809 | UNLOCK_THREAD(thread); | ||
810 | corelock_lock(&blsplay->cl); | 704 | corelock_lock(&blsplay->cl); |
811 | 705 | ||
812 | blpr = find_highest_priority_in_list_l(*bqp); | 706 | LOCK_THREAD(blt); |
707 | |||
708 | int blpr = bl->priority; | ||
709 | priority_disinherit_internal(blt, blpr); | ||
710 | |||
711 | if (count > 1) | ||
712 | { | ||
813 | blsplay->blocker.thread = NULL; | 713 | blsplay->blocker.thread = NULL; |
814 | 714 | ||
815 | thread = temp_queue; | 715 | blpr = wait_queue_find_priority(wqp); |
816 | LOCK_THREAD(thread); | 716 | |
717 | FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum) | ||
718 | { | ||
719 | UNLOCK_THREAD(thread); | ||
720 | thread = __thread_slot_entry(slotnum); | ||
721 | LOCK_THREAD(thread); | ||
722 | priority_inherit_internal(thread, blpr); | ||
723 | core_schedule_wakeup(thread); | ||
724 | } | ||
817 | } | 725 | } |
818 | else | 726 | else |
819 | { | 727 | { |
820 | /* Becomes a simple, direct transfer */ | 728 | /* Becomes a simple, direct transfer */ |
821 | if (thread->priority <= blpr) | ||
822 | blpr = find_highest_priority_in_list_l(*bqp); | ||
823 | blsplay->blocker.thread = thread; | 729 | blsplay->blocker.thread = thread; |
824 | } | ||
825 | |||
826 | blsplay->blocker.priority = blpr; | ||
827 | 730 | ||
828 | while (1) | 731 | if (thread->priority <= blpr) |
829 | { | 732 | blpr = wait_queue_find_priority(wqp); |
830 | unsigned int slotnum = THREAD_ID_SLOT(thread->id); | ||
831 | threadbit_set_bit(&blsplay->mask, slotnum); | ||
832 | |||
833 | if (blpr < PRIORITY_IDLE) | ||
834 | { | ||
835 | prio_add_entry(&thread->pdist, blpr); | ||
836 | if (blpr < thread->priority) | ||
837 | thread->priority = blpr; | ||
838 | } | ||
839 | |||
840 | if (count > 1) | ||
841 | remove_from_list_l(&temp_queue, thread); | ||
842 | 733 | ||
734 | priority_inherit_internal(thread, blpr); | ||
843 | core_schedule_wakeup(thread); | 735 | core_schedule_wakeup(thread); |
736 | } | ||
844 | 737 | ||
845 | UNLOCK_THREAD(thread); | 738 | UNLOCK_THREAD(thread); |
846 | |||
847 | thread = temp_queue; | ||
848 | if (thread == NULL) | ||
849 | break; | ||
850 | 739 | ||
851 | LOCK_THREAD(thread); | 740 | bl->priority = blpr; |
852 | } | ||
853 | 741 | ||
854 | UNLOCK_THREAD(blt); | 742 | UNLOCK_THREAD(blt); |
855 | 743 | ||
856 | if (count > 1) | 744 | if (count > 1) |
857 | { | ||
858 | corelock_unlock(&blsplay->cl); | 745 | corelock_unlock(&blsplay->cl); |
859 | } | ||
860 | 746 | ||
861 | blt->retval = count; | 747 | blt->retval = count; |
862 | } | 748 | } |
@@ -876,29 +762,20 @@ static void wakeup_thread_transfer(struct thread_entry *thread) | |||
876 | struct blocker *bl = thread->blocker; | 762 | struct blocker *bl = thread->blocker; |
877 | struct thread_entry *blt = bl->thread; | 763 | struct thread_entry *blt = bl->thread; |
878 | 764 | ||
879 | THREAD_ASSERT(cores[CURRENT_CORE].running == blt, | 765 | THREAD_ASSERT(__running_self_entry() == blt, |
880 | "UPPT->wrong thread", cores[CURRENT_CORE].running); | 766 | "UPPT->wrong thread", __running_self_entry()); |
881 | 767 | ||
882 | LOCK_THREAD(blt); | 768 | LOCK_THREAD(blt); |
883 | 769 | ||
884 | struct thread_entry **bqp = thread->bqp; | ||
885 | remove_from_list_l(bqp, thread); | ||
886 | thread->blocker = NULL; | 770 | thread->blocker = NULL; |
771 | struct __wait_queue *wqp = wait_queue_remove(thread); | ||
887 | 772 | ||
888 | int blpr = bl->priority; | 773 | int blpr = bl->priority; |
889 | 774 | ||
890 | /* Remove the object's boost from the owning thread */ | 775 | /* Remove the object's boost from the owning thread */ |
891 | if (prio_subtract_entry(&blt->pdist, blpr) == 0 && blpr <= blt->priority) | 776 | priority_disinherit_internal_inner(blt, blpr); |
892 | { | ||
893 | /* No more threads at this priority are waiting and the old level is | ||
894 | * at least the thread level */ | ||
895 | int priority = priobit_ffs(&blt->pdist.mask); | ||
896 | if (priority != blt->priority) | ||
897 | set_running_thread_priority(blt, priority); | ||
898 | } | ||
899 | |||
900 | struct thread_entry *tnext = *bqp; | ||
901 | 777 | ||
778 | struct thread_entry *tnext = WQ_THREAD_FIRST(wqp); | ||
902 | if (LIKELY(tnext == NULL)) | 779 | if (LIKELY(tnext == NULL)) |
903 | { | 780 | { |
904 | /* Expected shortcut - no more waiters */ | 781 | /* Expected shortcut - no more waiters */ |
@@ -906,20 +783,20 @@ static void wakeup_thread_transfer(struct thread_entry *thread) | |||
906 | } | 783 | } |
907 | else | 784 | else |
908 | { | 785 | { |
909 | /* If lowering, we need to scan threads remaining in queue */ | 786 | /* If thread is at the blocker priority, its removal may drop it */ |
910 | int priority = thread->priority; | 787 | if (thread->priority <= blpr) |
911 | if (priority <= blpr) | 788 | blpr = wait_queue_find_priority(wqp); |
912 | blpr = find_highest_priority_in_list_l(tnext); | ||
913 | 789 | ||
914 | if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < priority) | 790 | priority_inherit_internal_inner(thread, blpr); |
915 | thread->priority = blpr; /* Raise new owner */ | ||
916 | } | 791 | } |
917 | 792 | ||
793 | bl->thread = thread; /* This thread pwns */ | ||
794 | |||
918 | core_schedule_wakeup(thread); | 795 | core_schedule_wakeup(thread); |
919 | UNLOCK_THREAD(thread); | 796 | UNLOCK_THREAD(thread); |
920 | 797 | ||
921 | bl->thread = thread; /* This thread pwns */ | 798 | bl->priority = blpr; /* Save highest blocked priority */ |
922 | bl->priority = blpr; /* Save highest blocked priority */ | 799 | |
923 | UNLOCK_THREAD(blt); | 800 | UNLOCK_THREAD(blt); |
924 | } | 801 | } |
925 | 802 | ||
@@ -933,9 +810,9 @@ static void wakeup_thread_release(struct thread_entry *thread) | |||
933 | { | 810 | { |
934 | struct blocker *bl = thread->blocker; | 811 | struct blocker *bl = thread->blocker; |
935 | struct thread_entry *blt = lock_blocker_thread(bl); | 812 | struct thread_entry *blt = lock_blocker_thread(bl); |
936 | struct thread_entry **bqp = thread->bqp; | 813 | |
937 | remove_from_list_l(bqp, thread); | ||
938 | thread->blocker = NULL; | 814 | thread->blocker = NULL; |
815 | struct __wait_queue *wqp = wait_queue_remove(thread); | ||
939 | 816 | ||
940 | /* Off to see the wizard... */ | 817 | /* Off to see the wizard... */ |
941 | core_schedule_wakeup(thread); | 818 | core_schedule_wakeup(thread); |
@@ -950,7 +827,7 @@ static void wakeup_thread_release(struct thread_entry *thread) | |||
950 | 827 | ||
951 | UNLOCK_THREAD(thread); | 828 | UNLOCK_THREAD(thread); |
952 | 829 | ||
953 | int newblpr = find_highest_priority_in_list_l(*bqp); | 830 | int newblpr = wait_queue_find_priority(wqp); |
954 | if (newblpr == bl->priority) | 831 | if (newblpr == bl->priority) |
955 | { | 832 | { |
956 | /* Blocker priority won't change */ | 833 | /* Blocker priority won't change */ |
@@ -963,25 +840,17 @@ static void wakeup_thread_release(struct thread_entry *thread) | |||
963 | 840 | ||
964 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 841 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
965 | 842 | ||
843 | |||
966 | /*--------------------------------------------------------------------------- | 844 | /*--------------------------------------------------------------------------- |
967 | * Explicitly wakeup a thread on a blocking queue. Only effects threads of | 845 | * Explicitly wakeup a thread on a blocking queue. Only effects threads of |
968 | * STATE_BLOCKED and STATE_BLOCKED_W_TMO. | 846 | * STATE_BLOCKED and STATE_BLOCKED_W_TMO. |
969 | * | 847 | * |
970 | * This code should be considered a critical section by the caller meaning | 848 | * INTERNAL: Intended for use by kernel and not programs. |
971 | * that the object's corelock should be held. | ||
972 | * | ||
973 | * INTERNAL: Intended for use by kernel objects and not for programs. | ||
974 | *--------------------------------------------------------------------------- | 849 | *--------------------------------------------------------------------------- |
975 | */ | 850 | */ |
976 | unsigned int wakeup_thread_(struct thread_entry **list | 851 | unsigned int wakeup_thread_(struct thread_entry *thread |
977 | IF_PRIO(, enum wakeup_thread_protocol proto)) | 852 | IF_PRIO(, enum wakeup_thread_protocol proto)) |
978 | { | 853 | { |
979 | struct thread_entry *thread = *list; | ||
980 | |||
981 | /* Check if there is a blocked thread at all. */ | ||
982 | if (*list == NULL) | ||
983 | return THREAD_NONE; | ||
984 | |||
985 | LOCK_THREAD(thread); | 854 | LOCK_THREAD(thread); |
986 | 855 | ||
987 | /* Determine thread's current state. */ | 856 | /* Determine thread's current state. */ |
@@ -1008,24 +877,21 @@ unsigned int wakeup_thread_(struct thread_entry **list | |||
1008 | else | 877 | else |
1009 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 878 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
1010 | { | 879 | { |
1011 | /* No PIP - just boost the thread by aging */ | 880 | wait_queue_remove(thread); |
1012 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1013 | thread->skip_count = thread->priority; | ||
1014 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
1015 | remove_from_list_l(list, thread); | ||
1016 | core_schedule_wakeup(thread); | 881 | core_schedule_wakeup(thread); |
1017 | UNLOCK_THREAD(thread); | 882 | UNLOCK_THREAD(thread); |
1018 | } | 883 | } |
1019 | 884 | ||
1020 | return should_switch_tasks(); | 885 | return should_switch_tasks(thread); |
1021 | 886 | ||
1022 | /* Nothing to do. State is not blocked. */ | ||
1023 | default: | ||
1024 | #if THREAD_EXTRA_CHECKS | ||
1025 | THREAD_PANICF("wakeup_thread->block invalid", thread); | ||
1026 | case STATE_RUNNING: | 887 | case STATE_RUNNING: |
1027 | case STATE_KILLED: | 888 | if (wait_queue_try_remove(thread)) |
1028 | #endif | 889 | { |
890 | UNLOCK_THREAD(thread); | ||
891 | return THREAD_OK; /* timed out */ | ||
892 | } | ||
893 | |||
894 | default: | ||
1029 | UNLOCK_THREAD(thread); | 895 | UNLOCK_THREAD(thread); |
1030 | return THREAD_NONE; | 896 | return THREAD_NONE; |
1031 | } | 897 | } |
@@ -1037,201 +903,102 @@ unsigned int wakeup_thread_(struct thread_entry **list | |||
1037 | * tick when the next check will occur. | 903 | * tick when the next check will occur. |
1038 | *--------------------------------------------------------------------------- | 904 | *--------------------------------------------------------------------------- |
1039 | */ | 905 | */ |
1040 | void check_tmo_threads(void) | 906 | static NO_INLINE void check_tmo_expired_inner(struct core_entry *corep) |
1041 | { | 907 | { |
1042 | const unsigned int core = CURRENT_CORE; | ||
1043 | const long tick = current_tick; /* snapshot the current tick */ | 908 | const long tick = current_tick; /* snapshot the current tick */ |
1044 | long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */ | 909 | long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */ |
1045 | struct thread_entry *next = cores[core].timeout; | 910 | struct thread_entry *prev = NULL; |
911 | struct thread_entry *thread = TMO_THREAD_FIRST(&corep->tmo); | ||
1046 | 912 | ||
1047 | /* If there are no processes waiting for a timeout, just keep the check | 913 | /* If there are no processes waiting for a timeout, just keep the check |
1048 | tick from falling into the past. */ | 914 | tick from falling into the past. */ |
1049 | 915 | ||
1050 | /* Break the loop once we have walked through the list of all | 916 | /* Break the loop once we have walked through the list of all |
1051 | * sleeping processes or have removed them all. */ | 917 | * sleeping processes or have removed them all. */ |
1052 | while (next != NULL) | 918 | while (thread != NULL) |
1053 | { | 919 | { |
1054 | /* Check sleeping threads. Allow interrupts between checks. */ | 920 | /* Check sleeping threads. Allow interrupts between checks. */ |
1055 | enable_irq(); | 921 | enable_irq(); |
1056 | 922 | ||
1057 | struct thread_entry *curr = next; | 923 | struct thread_entry *next = TMO_THREAD_NEXT(thread); |
1058 | |||
1059 | next = curr->tmo.next; | ||
1060 | 924 | ||
1061 | /* Lock thread slot against explicit wakeup */ | 925 | /* Lock thread slot against explicit wakeup */ |
1062 | disable_irq(); | 926 | disable_irq(); |
1063 | LOCK_THREAD(curr); | 927 | LOCK_THREAD(thread); |
1064 | 928 | ||
1065 | unsigned state = curr->state; | 929 | unsigned int state = thread->state; |
1066 | 930 | ||
1067 | if (state < TIMEOUT_STATE_FIRST) | 931 | if (LIKELY(state >= TIMEOUT_STATE_FIRST && |
1068 | { | 932 | TIME_BEFORE(tick, thread->tmo_tick))) |
1069 | /* Cleanup threads no longer on a timeout but still on the | ||
1070 | * list. */ | ||
1071 | remove_from_list_tmo(curr); | ||
1072 | } | ||
1073 | else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick))) | ||
1074 | { | 933 | { |
1075 | /* Timeout still pending - this will be the usual case */ | 934 | /* Timeout still pending - this will be the usual case */ |
1076 | if (TIME_BEFORE(curr->tmo_tick, next_tmo_check)) | 935 | if (TIME_BEFORE(thread->tmo_tick, next_tmo_check)) |
1077 | { | 936 | { |
1078 | /* Earliest timeout found so far - move the next check up | 937 | /* Move the next check up to its time */ |
1079 | to its time */ | 938 | next_tmo_check = thread->tmo_tick; |
1080 | next_tmo_check = curr->tmo_tick; | ||
1081 | } | 939 | } |
940 | |||
941 | prev = thread; | ||
1082 | } | 942 | } |
1083 | else | 943 | else |
1084 | { | 944 | { |
1085 | /* Sleep timeout has been reached so bring the thread back to | 945 | /* TODO: there are no priority-inheriting timeout blocks |
1086 | * life again. */ | 946 | right now but the procedure should be established */ |
1087 | if (state == STATE_BLOCKED_W_TMO) | ||
1088 | { | ||
1089 | #ifdef HAVE_CORELOCK_OBJECT | ||
1090 | /* Lock the waiting thread's kernel object */ | ||
1091 | struct corelock *ocl = curr->obj_cl; | ||
1092 | |||
1093 | if (UNLIKELY(corelock_try_lock(ocl) == 0)) | ||
1094 | { | ||
1095 | /* Need to retry in the correct order though the need is | ||
1096 | * unlikely */ | ||
1097 | UNLOCK_THREAD(curr); | ||
1098 | corelock_lock(ocl); | ||
1099 | LOCK_THREAD(curr); | ||
1100 | |||
1101 | if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO)) | ||
1102 | { | ||
1103 | /* Thread was woken or removed explicitely while slot | ||
1104 | * was unlocked */ | ||
1105 | corelock_unlock(ocl); | ||
1106 | remove_from_list_tmo(curr); | ||
1107 | UNLOCK_THREAD(curr); | ||
1108 | continue; | ||
1109 | } | ||
1110 | } | ||
1111 | #endif /* NUM_CORES */ | ||
1112 | |||
1113 | #ifdef HAVE_WAKEUP_EXT_CB | ||
1114 | if (curr->wakeup_ext_cb != NULL) | ||
1115 | curr->wakeup_ext_cb(curr); | ||
1116 | #endif | ||
1117 | |||
1118 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1119 | if (curr->blocker != NULL) | ||
1120 | wakeup_thread_release(curr); | ||
1121 | else | ||
1122 | #endif | ||
1123 | remove_from_list_l(curr->bqp, curr); | ||
1124 | |||
1125 | corelock_unlock(ocl); | ||
1126 | } | ||
1127 | /* else state == STATE_SLEEPING */ | ||
1128 | 947 | ||
1129 | remove_from_list_tmo(curr); | 948 | /* Sleep timeout has been reached / garbage collect stale list |
949 | items */ | ||
950 | tmo_queue_expire(&corep->tmo, prev, thread); | ||
1130 | 951 | ||
1131 | RTR_LOCK(core); | 952 | if (state >= TIMEOUT_STATE_FIRST) |
953 | core_rtr_add(corep, thread); | ||
1132 | 954 | ||
1133 | curr->state = STATE_RUNNING; | 955 | /* removed this one - prev doesn't change */ |
1134 | |||
1135 | add_to_list_l(&cores[core].running, curr); | ||
1136 | rtr_add_entry(core, curr->priority); | ||
1137 | |||
1138 | RTR_UNLOCK(core); | ||
1139 | } | 956 | } |
1140 | 957 | ||
1141 | UNLOCK_THREAD(curr); | 958 | UNLOCK_THREAD(thread); |
1142 | } | ||
1143 | |||
1144 | cores[core].next_tmo_check = next_tmo_check; | ||
1145 | } | ||
1146 | |||
1147 | /*--------------------------------------------------------------------------- | ||
1148 | * Performs operations that must be done before blocking a thread but after | ||
1149 | * the state is saved. | ||
1150 | *--------------------------------------------------------------------------- | ||
1151 | */ | ||
1152 | #if NUM_CORES > 1 | ||
1153 | static inline void run_blocking_ops( | ||
1154 | unsigned int core, struct thread_entry *thread) | ||
1155 | { | ||
1156 | struct thread_blk_ops *ops = &cores[core].blk_ops; | ||
1157 | const unsigned flags = ops->flags; | ||
1158 | |||
1159 | if (LIKELY(flags == TBOP_CLEAR)) | ||
1160 | return; | ||
1161 | 959 | ||
1162 | switch (flags) | 960 | thread = next; |
1163 | { | ||
1164 | case TBOP_SWITCH_CORE: | ||
1165 | core_switch_blk_op(core, thread); | ||
1166 | /* Fall-through */ | ||
1167 | case TBOP_UNLOCK_CORELOCK: | ||
1168 | corelock_unlock(ops->cl_p); | ||
1169 | break; | ||
1170 | } | 961 | } |
1171 | 962 | ||
1172 | ops->flags = TBOP_CLEAR; | 963 | corep->next_tmo_check = next_tmo_check; |
1173 | } | 964 | } |
1174 | #endif /* NUM_CORES > 1 */ | ||
1175 | 965 | ||
1176 | #ifdef RB_PROFILE | 966 | static FORCE_INLINE void check_tmo_expired(struct core_entry *corep) |
1177 | void profile_thread(void) | ||
1178 | { | 967 | { |
1179 | profstart(cores[CURRENT_CORE].running - threads); | 968 | if (!TIME_BEFORE(current_tick, corep->next_tmo_check)) |
969 | check_tmo_expired_inner(corep); | ||
1180 | } | 970 | } |
1181 | #endif | ||
1182 | 971 | ||
1183 | /*--------------------------------------------------------------------------- | 972 | /*--------------------------------------------------------------------------- |
1184 | * Prepares a thread to block on an object's list and/or for a specified | 973 | * Prepares a the current thread to sleep forever or for the given duration. |
1185 | * duration - expects object and slot to be appropriately locked if needed | ||
1186 | * and interrupts to be masked. | ||
1187 | *--------------------------------------------------------------------------- | 974 | *--------------------------------------------------------------------------- |
1188 | */ | 975 | */ |
1189 | static inline void block_thread_on_l(struct thread_entry *thread, | 976 | static FORCE_INLINE void prepare_block(struct thread_entry *current, |
1190 | unsigned state) | 977 | unsigned int state, int timeout) |
1191 | { | 978 | { |
1192 | /* If inlined, unreachable branches will be pruned with no size penalty | 979 | const unsigned int core = IF_COP_CORE(current->core); |
1193 | because state is passed as a constant parameter. */ | ||
1194 | const unsigned int core = IF_COP_CORE(thread->core); | ||
1195 | 980 | ||
1196 | /* Remove the thread from the list of running threads. */ | 981 | /* Remove the thread from the list of running threads. */ |
1197 | RTR_LOCK(core); | 982 | struct core_entry *corep = __core_id_entry(core); |
1198 | remove_from_list_l(&cores[core].running, thread); | 983 | core_rtr_remove(corep, current); |
1199 | rtr_subtract_entry(core, thread->priority); | ||
1200 | RTR_UNLOCK(core); | ||
1201 | 984 | ||
1202 | /* Add a timeout to the block if not infinite */ | 985 | if (timeout >= 0) |
1203 | switch (state) | ||
1204 | { | 986 | { |
1205 | case STATE_BLOCKED: | 987 | /* Sleep may expire. */ |
1206 | case STATE_BLOCKED_W_TMO: | 988 | long tmo_tick = current_tick + timeout; |
1207 | /* Put the thread into a new list of inactive threads. */ | 989 | current->tmo_tick = tmo_tick; |
1208 | add_to_list_l(thread->bqp, thread); | ||
1209 | 990 | ||
1210 | if (state == STATE_BLOCKED) | 991 | if (TIME_BEFORE(tmo_tick, corep->next_tmo_check)) |
1211 | break; | 992 | corep->next_tmo_check = tmo_tick; |
1212 | 993 | ||
1213 | /* Fall-through */ | 994 | tmo_queue_register(&corep->tmo, current); |
1214 | case STATE_SLEEPING: | ||
1215 | /* If this thread times out sooner than any other thread, update | ||
1216 | next_tmo_check to its timeout */ | ||
1217 | if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check)) | ||
1218 | { | ||
1219 | cores[core].next_tmo_check = thread->tmo_tick; | ||
1220 | } | ||
1221 | 995 | ||
1222 | if (thread->tmo.prev == NULL) | 996 | if (state == STATE_BLOCKED) |
1223 | { | 997 | state = STATE_BLOCKED_W_TMO; |
1224 | add_to_list_tmo(thread); | ||
1225 | } | ||
1226 | /* else thread was never removed from list - just keep it there */ | ||
1227 | break; | ||
1228 | } | 998 | } |
1229 | 999 | ||
1230 | /* Remember the the next thread about to block. */ | ||
1231 | cores[core].block_task = thread; | ||
1232 | |||
1233 | /* Report new state. */ | 1000 | /* Report new state. */ |
1234 | thread->state = state; | 1001 | current->state = state; |
1235 | } | 1002 | } |
1236 | 1003 | ||
1237 | /*--------------------------------------------------------------------------- | 1004 | /*--------------------------------------------------------------------------- |
@@ -1239,178 +1006,120 @@ static inline void block_thread_on_l(struct thread_entry *thread, | |||
1239 | * that removed itself from the running list first must specify itself in | 1006 | * that removed itself from the running list first must specify itself in |
1240 | * the paramter. | 1007 | * the paramter. |
1241 | * | 1008 | * |
1242 | * INTERNAL: Intended for use by kernel and not for programs. | 1009 | * INTERNAL: Intended for use by kernel and not programs. |
1243 | *--------------------------------------------------------------------------- | 1010 | *--------------------------------------------------------------------------- |
1244 | */ | 1011 | */ |
1245 | void switch_thread(void) | 1012 | void switch_thread(void) |
1246 | { | 1013 | { |
1247 | |||
1248 | const unsigned int core = CURRENT_CORE; | 1014 | const unsigned int core = CURRENT_CORE; |
1249 | struct thread_entry *block = cores[core].block_task; | 1015 | struct core_entry *corep = __core_id_entry(core); |
1250 | struct thread_entry *thread = cores[core].running; | 1016 | struct thread_entry *thread = corep->running; |
1251 | 1017 | ||
1252 | /* Get context to save - next thread to run is unknown until all wakeups | 1018 | if (thread) |
1253 | * are evaluated */ | ||
1254 | if (block != NULL) | ||
1255 | { | 1019 | { |
1256 | cores[core].block_task = NULL; | ||
1257 | |||
1258 | #if NUM_CORES > 1 | ||
1259 | if (UNLIKELY(thread == block)) | ||
1260 | { | ||
1261 | /* This was the last thread running and another core woke us before | ||
1262 | * reaching here. Force next thread selection to give tmo threads or | ||
1263 | * other threads woken before this block a first chance. */ | ||
1264 | block = NULL; | ||
1265 | } | ||
1266 | else | ||
1267 | #endif | ||
1268 | { | ||
1269 | /* Blocking task is the old one */ | ||
1270 | thread = block; | ||
1271 | } | ||
1272 | } | ||
1273 | |||
1274 | #ifdef RB_PROFILE | 1020 | #ifdef RB_PROFILE |
1275 | #ifdef CPU_COLDFIRE | 1021 | profile_thread_stopped(THREAD_ID_SLOT(thread->id)); |
1276 | _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK); | ||
1277 | #else | ||
1278 | profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK); | ||
1279 | #endif | 1022 | #endif |
1280 | #endif | ||
1281 | |||
1282 | /* Begin task switching by saving our current context so that we can | ||
1283 | * restore the state of the current thread later to the point prior | ||
1284 | * to this call. */ | ||
1285 | thread_store_context(thread); | ||
1286 | #ifdef DEBUG | 1023 | #ifdef DEBUG |
1287 | /* Check core_ctx buflib integrity */ | 1024 | /* Check core_ctx buflib integrity */ |
1288 | core_check_valid(); | 1025 | core_check_valid(); |
1289 | #endif | ||
1290 | |||
1291 | /* Check if the current thread stack is overflown */ | ||
1292 | if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0) | ||
1293 | thread_stkov(thread); | ||
1294 | |||
1295 | #if NUM_CORES > 1 | ||
1296 | /* Run any blocking operations requested before switching/sleeping */ | ||
1297 | run_blocking_ops(core, thread); | ||
1298 | #endif | 1026 | #endif |
1027 | thread_store_context(thread); | ||
1299 | 1028 | ||
1300 | #ifdef HAVE_PRIORITY_SCHEDULING | 1029 | /* Check if the current thread stack is overflown */ |
1301 | /* Reset the value of thread's skip count */ | 1030 | if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0) |
1302 | thread->skip_count = 0; | 1031 | thread_stkov(thread); |
1303 | #endif | 1032 | } |
1304 | 1033 | ||
1034 | /* TODO: make a real idle task */ | ||
1305 | for (;;) | 1035 | for (;;) |
1306 | { | 1036 | { |
1307 | /* If there are threads on a timeout and the earliest wakeup is due, | ||
1308 | * check the list and wake any threads that need to start running | ||
1309 | * again. */ | ||
1310 | if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check)) | ||
1311 | { | ||
1312 | check_tmo_threads(); | ||
1313 | } | ||
1314 | |||
1315 | disable_irq(); | 1037 | disable_irq(); |
1316 | RTR_LOCK(core); | ||
1317 | 1038 | ||
1318 | thread = cores[core].running; | 1039 | /* Check for expired timeouts */ |
1040 | check_tmo_expired(corep); | ||
1319 | 1041 | ||
1320 | if (UNLIKELY(thread == NULL)) | 1042 | RTR_LOCK(corep); |
1321 | { | ||
1322 | /* Enter sleep mode to reduce power usage - woken up on interrupt | ||
1323 | * or wakeup request from another core - expected to enable | ||
1324 | * interrupts. */ | ||
1325 | RTR_UNLOCK(core); | ||
1326 | core_sleep(IF_COP(core)); | ||
1327 | } | ||
1328 | else | ||
1329 | { | ||
1330 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1331 | /* Select the new task based on priorities and the last time a | ||
1332 | * process got CPU time relative to the highest priority runnable | ||
1333 | * task. */ | ||
1334 | int max = priobit_ffs(&cores[core].rtr.mask); | ||
1335 | 1043 | ||
1336 | if (block == NULL) | 1044 | if (!RTR_EMPTY(&corep->rtr)) |
1337 | { | 1045 | break; |
1338 | /* Not switching on a block, tentatively select next thread */ | ||
1339 | thread = thread->l.next; | ||
1340 | } | ||
1341 | 1046 | ||
1342 | for (;;) | 1047 | thread = NULL; |
1343 | { | 1048 | |
1344 | int priority = thread->priority; | 1049 | /* Enter sleep mode to reduce power usage */ |
1345 | int diff; | 1050 | RTR_UNLOCK(corep); |
1346 | 1051 | core_sleep(IF_COP(core)); | |
1347 | /* This ridiculously simple method of aging seems to work | 1052 | |
1348 | * suspiciously well. It does tend to reward CPU hogs (under | 1053 | /* Awakened by interrupt or other CPU */ |
1349 | * yielding) but that's generally not desirable at all. On | 1054 | } |
1350 | * the plus side, it, relatively to other threads, penalizes | 1055 | |
1351 | * excess yielding which is good if some high priority thread | 1056 | thread = (thread && thread->state == STATE_RUNNING) ? |
1352 | * is performing no useful work such as polling for a device | 1057 | RTR_THREAD_NEXT(thread) : RTR_THREAD_FIRST(&corep->rtr); |
1353 | * to be ready. Of course, aging is only employed when higher | 1058 | |
1354 | * and lower priority threads are runnable. The highest | 1059 | #ifdef HAVE_PRIORITY_SCHEDULING |
1355 | * priority runnable thread(s) are never skipped unless a | 1060 | /* Select the new task based on priorities and the last time a |
1356 | * lower-priority process has aged sufficiently. Priorities | 1061 | * process got CPU time relative to the highest priority runnable |
1357 | * of REALTIME class are run strictly according to priority | 1062 | * task. If priority is not a feature, then FCFS is used (above). */ |
1358 | * thus are not subject to switchout due to lower-priority | 1063 | int max = priobit_ffs(&corep->rtr_dist.mask); |
1359 | * processes aging; they must give up the processor by going | ||
1360 | * off the run list. */ | ||
1361 | if (LIKELY(priority <= max) || | ||
1362 | (priority > PRIORITY_REALTIME && | ||
1363 | (diff = priority - max, | ||
1364 | ++thread->skip_count > diff*diff))) | ||
1365 | { | ||
1366 | cores[core].running = thread; | ||
1367 | break; | ||
1368 | } | ||
1369 | |||
1370 | thread = thread->l.next; | ||
1371 | } | ||
1372 | #else | ||
1373 | /* Without priority use a simple FCFS algorithm */ | ||
1374 | if (block == NULL) | ||
1375 | { | ||
1376 | /* Not switching on a block, select next thread */ | ||
1377 | thread = thread->l.next; | ||
1378 | cores[core].running = thread; | ||
1379 | } | ||
1380 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
1381 | 1064 | ||
1382 | RTR_UNLOCK(core); | 1065 | for (;;) |
1383 | enable_irq(); | 1066 | { |
1067 | int priority = thread->priority; | ||
1068 | int diff; | ||
1069 | |||
1070 | /* This ridiculously simple method of aging seems to work | ||
1071 | * suspiciously well. It does tend to reward CPU hogs (under | ||
1072 | * yielding) but that's generally not desirable at all. On | ||
1073 | * the plus side, it, relatively to other threads, penalizes | ||
1074 | * excess yielding which is good if some high priority thread | ||
1075 | * is performing no useful work such as polling for a device | ||
1076 | * to be ready. Of course, aging is only employed when higher | ||
1077 | * and lower priority threads are runnable. The highest | ||
1078 | * priority runnable thread(s) are never skipped unless a | ||
1079 | * lower-priority process has aged sufficiently. Priorities | ||
1080 | * of REALTIME class are run strictly according to priority | ||
1081 | * thus are not subject to switchout due to lower-priority | ||
1082 | * processes aging; they must give up the processor by going | ||
1083 | * off the run list. */ | ||
1084 | if (LIKELY(priority <= max) || | ||
1085 | (priority > PRIORITY_REALTIME && | ||
1086 | (diff = priority - max, ++thread->skip_count > diff*diff))) | ||
1087 | { | ||
1384 | break; | 1088 | break; |
1385 | } | 1089 | } |
1090 | |||
1091 | thread = RTR_THREAD_NEXT(thread); | ||
1386 | } | 1092 | } |
1387 | 1093 | ||
1388 | /* And finally give control to the next thread. */ | 1094 | thread->skip_count = 0; /* Reset aging counter */ |
1095 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
1096 | |||
1097 | rtr_queue_make_first(&corep->rtr, thread); | ||
1098 | corep->running = thread; | ||
1099 | |||
1100 | RTR_UNLOCK(corep); | ||
1101 | enable_irq(); | ||
1102 | |||
1103 | /* And finally, give control to the next thread. */ | ||
1389 | thread_load_context(thread); | 1104 | thread_load_context(thread); |
1390 | 1105 | ||
1391 | #ifdef RB_PROFILE | 1106 | #ifdef RB_PROFILE |
1392 | profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); | 1107 | profile_thread_started(THREAD_ID_SLOT(thread->id)); |
1393 | #endif | 1108 | #endif |
1394 | |||
1395 | } | 1109 | } |
1396 | 1110 | ||
1397 | /*--------------------------------------------------------------------------- | 1111 | /*--------------------------------------------------------------------------- |
1398 | * Sleeps a thread for at least a specified number of ticks with zero being | 1112 | * Sleeps a thread for at least a specified number of ticks with zero being |
1399 | * a wait until the next tick. | 1113 | * a wait until the next tick. |
1400 | * | 1114 | * |
1401 | * INTERNAL: Intended for use by kernel and not for programs. | 1115 | * INTERNAL: Intended for use by kernel and not programs. |
1402 | *--------------------------------------------------------------------------- | 1116 | *--------------------------------------------------------------------------- |
1403 | */ | 1117 | */ |
1404 | void sleep_thread(int ticks) | 1118 | void sleep_thread(int ticks) |
1405 | { | 1119 | { |
1406 | struct thread_entry *current = cores[CURRENT_CORE].running; | 1120 | struct thread_entry *current = __running_self_entry(); |
1407 | |||
1408 | LOCK_THREAD(current); | 1121 | LOCK_THREAD(current); |
1409 | 1122 | prepare_block(current, STATE_SLEEPING, MAX(ticks, 0) + 1); | |
1410 | /* Set our timeout, remove from run list and join timeout list. */ | ||
1411 | current->tmo_tick = current_tick + MAX(ticks, 0) + 1; | ||
1412 | block_thread_on_l(current, STATE_SLEEPING); | ||
1413 | |||
1414 | UNLOCK_THREAD(current); | 1123 | UNLOCK_THREAD(current); |
1415 | } | 1124 | } |
1416 | 1125 | ||
@@ -1418,131 +1127,42 @@ void sleep_thread(int ticks) | |||
1418 | * Block a thread on a blocking queue for explicit wakeup. If timeout is | 1127 | * Block a thread on a blocking queue for explicit wakeup. If timeout is |
1419 | * negative, the block is infinite. | 1128 | * negative, the block is infinite. |
1420 | * | 1129 | * |
1421 | * INTERNAL: Intended for use by kernel objects and not for programs. | 1130 | * INTERNAL: Intended for use by kernel and not programs. |
1422 | *--------------------------------------------------------------------------- | 1131 | *--------------------------------------------------------------------------- |
1423 | */ | 1132 | */ |
1424 | void block_thread(struct thread_entry *current, int timeout) | 1133 | void block_thread_(struct thread_entry *current, int timeout) |
1425 | { | 1134 | { |
1426 | LOCK_THREAD(current); | 1135 | LOCK_THREAD(current); |
1427 | 1136 | ||
1428 | struct blocker *bl = NULL; | ||
1429 | #ifdef HAVE_PRIORITY_SCHEDULING | 1137 | #ifdef HAVE_PRIORITY_SCHEDULING |
1430 | bl = current->blocker; | 1138 | struct blocker *bl = current->blocker; |
1431 | struct thread_entry *blt = bl ? lock_blocker_thread(bl) : NULL; | 1139 | struct thread_entry *blt = NULL; |
1432 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 1140 | if (bl != NULL) |
1433 | |||
1434 | if (LIKELY(timeout < 0)) | ||
1435 | { | ||
1436 | /* Block until explicitly woken */ | ||
1437 | block_thread_on_l(current, STATE_BLOCKED); | ||
1438 | } | ||
1439 | else | ||
1440 | { | 1141 | { |
1441 | /* Set the state to blocked with the specified timeout */ | 1142 | current->blocker = bl; |
1442 | current->tmo_tick = current_tick + timeout; | 1143 | blt = lock_blocker_thread(bl); |
1443 | block_thread_on_l(current, STATE_BLOCKED_W_TMO); | ||
1444 | } | 1144 | } |
1145 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
1445 | 1146 | ||
1446 | if (bl == NULL) | 1147 | wait_queue_register(current); |
1447 | { | 1148 | prepare_block(current, STATE_BLOCKED, timeout); |
1448 | UNLOCK_THREAD(current); | ||
1449 | return; | ||
1450 | } | ||
1451 | 1149 | ||
1452 | #ifdef HAVE_PRIORITY_SCHEDULING | 1150 | #ifdef HAVE_PRIORITY_SCHEDULING |
1453 | int newblpr = current->priority; | 1151 | if (bl != NULL) |
1454 | UNLOCK_THREAD(current); | ||
1455 | |||
1456 | if (newblpr >= bl->priority) | ||
1457 | { | 1152 | { |
1458 | unlock_blocker_thread(bl); | 1153 | int newblpr = current->priority; |
1459 | return; /* Queue priority won't change */ | 1154 | UNLOCK_THREAD(current); |
1460 | } | ||
1461 | 1155 | ||
1462 | inherit_priority(bl, bl, blt, newblpr); | 1156 | if (newblpr < bl->priority) |
1157 | inherit_priority(bl, bl, blt, newblpr); | ||
1158 | else | ||
1159 | unlock_blocker_thread(bl); /* Queue priority won't change */ | ||
1160 | } | ||
1161 | else | ||
1463 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 1162 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
1464 | } | ||
1465 | |||
1466 | /*--------------------------------------------------------------------------- | ||
1467 | * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. | ||
1468 | *--------------------------------------------------------------------------- | ||
1469 | */ | ||
1470 | static void new_thread_id(unsigned int slot_num, | ||
1471 | struct thread_entry *thread) | ||
1472 | { | ||
1473 | unsigned int version = | ||
1474 | (thread->id + (1u << THREAD_ID_VERSION_SHIFT)) | ||
1475 | & THREAD_ID_VERSION_MASK; | ||
1476 | |||
1477 | /* If wrapped to 0, make it 1 */ | ||
1478 | if (version == 0) | ||
1479 | version = 1u << THREAD_ID_VERSION_SHIFT; | ||
1480 | |||
1481 | thread->id = version | (slot_num & THREAD_ID_SLOT_MASK); | ||
1482 | } | ||
1483 | |||
1484 | /*--------------------------------------------------------------------------- | ||
1485 | * Find an empty thread slot or MAXTHREADS if none found. The slot returned | ||
1486 | * will be locked on multicore. | ||
1487 | *--------------------------------------------------------------------------- | ||
1488 | */ | ||
1489 | static struct thread_entry * find_empty_thread_slot(void) | ||
1490 | { | ||
1491 | /* Any slot could be on an interrupt-accessible list */ | ||
1492 | IF_COP( int oldlevel = disable_irq_save(); ) | ||
1493 | struct thread_entry *thread = NULL; | ||
1494 | int n; | ||
1495 | |||
1496 | for (n = 0; n < MAXTHREADS; n++) | ||
1497 | { | 1163 | { |
1498 | /* Obtain current slot state - lock it on multicore */ | 1164 | UNLOCK_THREAD(current); |
1499 | struct thread_entry *t = &threads[n]; | ||
1500 | LOCK_THREAD(t); | ||
1501 | |||
1502 | if (t->state == STATE_KILLED) | ||
1503 | { | ||
1504 | /* Slot is empty - leave it locked and caller will unlock */ | ||
1505 | thread = t; | ||
1506 | break; | ||
1507 | } | ||
1508 | |||
1509 | /* Finished examining slot - no longer busy - unlock on multicore */ | ||
1510 | UNLOCK_THREAD(t); | ||
1511 | } | 1165 | } |
1512 | |||
1513 | IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is | ||
1514 | not accesible to them yet */ | ||
1515 | return thread; | ||
1516 | } | ||
1517 | |||
1518 | /*--------------------------------------------------------------------------- | ||
1519 | * Return the thread_entry pointer for a thread_id. Return the current | ||
1520 | * thread if the ID is (unsigned int)-1 (alias for current). | ||
1521 | *--------------------------------------------------------------------------- | ||
1522 | */ | ||
1523 | struct thread_entry * thread_id_entry(unsigned int thread_id) | ||
1524 | { | ||
1525 | return &threads[thread_id & THREAD_ID_SLOT_MASK]; | ||
1526 | } | ||
1527 | |||
1528 | /*--------------------------------------------------------------------------- | ||
1529 | * Return the thread id of the calling thread | ||
1530 | * -------------------------------------------------------------------------- | ||
1531 | */ | ||
1532 | unsigned int thread_self(void) | ||
1533 | { | ||
1534 | return cores[CURRENT_CORE].running->id; | ||
1535 | } | ||
1536 | |||
1537 | /*--------------------------------------------------------------------------- | ||
1538 | * Return the thread entry of the calling thread. | ||
1539 | * | ||
1540 | * INTERNAL: Intended for use by kernel and not for programs. | ||
1541 | *--------------------------------------------------------------------------- | ||
1542 | */ | ||
1543 | struct thread_entry* thread_self_entry(void) | ||
1544 | { | ||
1545 | return cores[CURRENT_CORE].running; | ||
1546 | } | 1166 | } |
1547 | 1167 | ||
1548 | /*--------------------------------------------------------------------------- | 1168 | /*--------------------------------------------------------------------------- |
@@ -1552,9 +1172,8 @@ struct thread_entry* thread_self_entry(void) | |||
1552 | */ | 1172 | */ |
1553 | void core_idle(void) | 1173 | void core_idle(void) |
1554 | { | 1174 | { |
1555 | IF_COP( const unsigned int core = CURRENT_CORE; ) | ||
1556 | disable_irq(); | 1175 | disable_irq(); |
1557 | core_sleep(IF_COP(core)); | 1176 | core_sleep(IF_COP(CURRENT_CORE)); |
1558 | } | 1177 | } |
1559 | 1178 | ||
1560 | /*--------------------------------------------------------------------------- | 1179 | /*--------------------------------------------------------------------------- |
@@ -1570,141 +1189,64 @@ unsigned int create_thread(void (*function)(void), | |||
1570 | IF_PRIO(, int priority) | 1189 | IF_PRIO(, int priority) |
1571 | IF_COP(, unsigned int core)) | 1190 | IF_COP(, unsigned int core)) |
1572 | { | 1191 | { |
1573 | unsigned int i; | 1192 | struct thread_entry *thread = thread_alloc(); |
1574 | unsigned int stack_words; | ||
1575 | uintptr_t stackptr, stackend; | ||
1576 | struct thread_entry *thread; | ||
1577 | unsigned state; | ||
1578 | int oldlevel; | ||
1579 | |||
1580 | thread = find_empty_thread_slot(); | ||
1581 | if (thread == NULL) | 1193 | if (thread == NULL) |
1582 | { | ||
1583 | return 0; | 1194 | return 0; |
1584 | } | ||
1585 | |||
1586 | oldlevel = disable_irq_save(); | ||
1587 | |||
1588 | /* Munge the stack to make it easy to spot stack overflows */ | ||
1589 | stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t)); | ||
1590 | stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t)); | ||
1591 | stack_size = stackend - stackptr; | ||
1592 | stack_words = stack_size / sizeof (uintptr_t); | ||
1593 | 1195 | ||
1594 | for (i = 0; i < stack_words; i++) | 1196 | new_thread_base_init(thread, &stack, &stack_size, name |
1595 | { | 1197 | IF_PRIO(, priority) IF_COP(, core)); |
1596 | ((uintptr_t *)stackptr)[i] = DEADBEEF; | ||
1597 | } | ||
1598 | 1198 | ||
1599 | /* Store interesting information */ | 1199 | unsigned int stack_words = stack_size / sizeof (uintptr_t); |
1600 | thread->name = name; | 1200 | if (stack_words == 0) |
1601 | thread->stack = (uintptr_t *)stackptr; | 1201 | return 0; |
1602 | thread->stack_size = stack_size; | ||
1603 | thread->queue = NULL; | ||
1604 | #ifdef HAVE_WAKEUP_EXT_CB | ||
1605 | thread->wakeup_ext_cb = NULL; | ||
1606 | #endif | ||
1607 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
1608 | thread->cpu_boost = 0; | ||
1609 | #endif | ||
1610 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1611 | memset(&thread->pdist, 0, sizeof(thread->pdist)); | ||
1612 | thread->blocker = NULL; | ||
1613 | thread->base_priority = priority; | ||
1614 | thread->priority = priority; | ||
1615 | thread->skip_count = priority; | ||
1616 | prio_add_entry(&thread->pdist, priority); | ||
1617 | #endif | ||
1618 | 1202 | ||
1619 | #ifdef HAVE_IO_PRIORITY | 1203 | /* Munge the stack to make it easy to spot stack overflows */ |
1620 | /* Default to high (foreground) priority */ | 1204 | for (unsigned int i = 0; i < stack_words; i++) |
1621 | thread->io_priority = IO_PRIORITY_IMMEDIATE; | 1205 | ((uintptr_t *)stack)[i] = DEADBEEF; |
1622 | #endif | ||
1623 | 1206 | ||
1624 | #if NUM_CORES > 1 | 1207 | #if NUM_CORES > 1 |
1625 | thread->core = core; | ||
1626 | |||
1627 | /* Writeback stack munging or anything else before starting */ | 1208 | /* Writeback stack munging or anything else before starting */ |
1628 | if (core != CURRENT_CORE) | 1209 | if (core != CURRENT_CORE) |
1629 | { | ||
1630 | commit_dcache(); | 1210 | commit_dcache(); |
1631 | } | ||
1632 | #endif | 1211 | #endif |
1633 | 1212 | ||
1634 | /* Thread is not on any timeout list but be a bit paranoid */ | 1213 | thread->context.sp = (typeof (thread->context.sp))(stack + stack_size); |
1635 | thread->tmo.prev = NULL; | ||
1636 | |||
1637 | state = (flags & CREATE_THREAD_FROZEN) ? | ||
1638 | STATE_FROZEN : STATE_RUNNING; | ||
1639 | |||
1640 | thread->context.sp = (typeof (thread->context.sp))stackend; | ||
1641 | |||
1642 | /* Load the thread's context structure with needed startup information */ | ||
1643 | THREAD_STARTUP_INIT(core, thread, function); | 1214 | THREAD_STARTUP_INIT(core, thread, function); |
1644 | 1215 | ||
1645 | thread->state = state; | 1216 | int oldlevel = disable_irq_save(); |
1646 | i = thread->id; /* Snapshot while locked */ | 1217 | LOCK_THREAD(thread); |
1218 | |||
1219 | thread->state = STATE_FROZEN; | ||
1647 | 1220 | ||
1648 | if (state == STATE_RUNNING) | 1221 | if (!(flags & CREATE_THREAD_FROZEN)) |
1649 | core_schedule_wakeup(thread); | 1222 | core_schedule_wakeup(thread); |
1650 | 1223 | ||
1224 | unsigned int id = thread->id; /* Snapshot while locked */ | ||
1225 | |||
1651 | UNLOCK_THREAD(thread); | 1226 | UNLOCK_THREAD(thread); |
1652 | restore_irq(oldlevel); | 1227 | restore_irq(oldlevel); |
1653 | 1228 | ||
1654 | return i; | 1229 | return id; |
1655 | } | 1230 | } |
1656 | 1231 | ||
1657 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
1658 | /*--------------------------------------------------------------------------- | ||
1659 | * Change the boost state of a thread boosting or unboosting the CPU | ||
1660 | * as required. | ||
1661 | *--------------------------------------------------------------------------- | ||
1662 | */ | ||
1663 | static inline void boost_thread(struct thread_entry *thread, bool boost) | ||
1664 | { | ||
1665 | if ((thread->cpu_boost != 0) != boost) | ||
1666 | { | ||
1667 | thread->cpu_boost = boost; | ||
1668 | cpu_boost(boost); | ||
1669 | } | ||
1670 | } | ||
1671 | |||
1672 | void trigger_cpu_boost(void) | ||
1673 | { | ||
1674 | struct thread_entry *current = cores[CURRENT_CORE].running; | ||
1675 | boost_thread(current, true); | ||
1676 | } | ||
1677 | |||
1678 | void cancel_cpu_boost(void) | ||
1679 | { | ||
1680 | struct thread_entry *current = cores[CURRENT_CORE].running; | ||
1681 | boost_thread(current, false); | ||
1682 | } | ||
1683 | #endif /* HAVE_SCHEDULER_BOOSTCTRL */ | ||
1684 | |||
1685 | /*--------------------------------------------------------------------------- | 1232 | /*--------------------------------------------------------------------------- |
1686 | * Block the current thread until another thread terminates. A thread may | 1233 | * Block the current thread until another thread terminates. A thread may |
1687 | * wait on itself to terminate which prevents it from running again and it | 1234 | * wait on itself to terminate but that will deadlock |
1688 | * will need to be killed externally. | 1235 | *. |
1689 | * Parameter is the ID as returned from create_thread(). | 1236 | * Parameter is the ID as returned from create_thread(). |
1690 | *--------------------------------------------------------------------------- | 1237 | *--------------------------------------------------------------------------- |
1691 | */ | 1238 | */ |
1692 | void thread_wait(unsigned int thread_id) | 1239 | void thread_wait(unsigned int thread_id) |
1693 | { | 1240 | { |
1694 | struct thread_entry *current = cores[CURRENT_CORE].running; | 1241 | struct thread_entry *current = __running_self_entry(); |
1695 | struct thread_entry *thread = thread_id_entry(thread_id); | 1242 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1696 | 1243 | ||
1697 | /* Lock thread-as-waitable-object lock */ | ||
1698 | corelock_lock(&thread->waiter_cl); | 1244 | corelock_lock(&thread->waiter_cl); |
1699 | 1245 | ||
1700 | /* Be sure it hasn't been killed yet */ | ||
1701 | if (thread->id == thread_id && thread->state != STATE_KILLED) | 1246 | if (thread->id == thread_id && thread->state != STATE_KILLED) |
1702 | { | 1247 | { |
1703 | IF_COP( current->obj_cl = &thread->waiter_cl; ) | ||
1704 | current->bqp = &thread->queue; | ||
1705 | |||
1706 | disable_irq(); | 1248 | disable_irq(); |
1707 | block_thread(current, TIMEOUT_BLOCK); | 1249 | block_thread(current, TIMEOUT_BLOCK, &thread->queue, NULL); |
1708 | 1250 | ||
1709 | corelock_unlock(&thread->waiter_cl); | 1251 | corelock_unlock(&thread->waiter_cl); |
1710 | 1252 | ||
@@ -1716,36 +1258,35 @@ void thread_wait(unsigned int thread_id) | |||
1716 | } | 1258 | } |
1717 | 1259 | ||
1718 | /*--------------------------------------------------------------------------- | 1260 | /*--------------------------------------------------------------------------- |
1719 | * Exit the current thread. The Right Way to Do Things (TM). | 1261 | * Exit the current thread |
1720 | *--------------------------------------------------------------------------- | 1262 | *--------------------------------------------------------------------------- |
1721 | */ | 1263 | */ |
1722 | /* This is done to foil optimizations that may require the current stack, | 1264 | static USED_ATTR NORETURN_ATTR |
1723 | * such as optimizing subexpressions that put variables on the stack that | 1265 | void thread_exit_final(struct thread_entry *current) |
1724 | * get used after switching stacks. */ | ||
1725 | #if NUM_CORES > 1 | ||
1726 | /* Called by ASM stub */ | ||
1727 | static void thread_final_exit_do(struct thread_entry *current) | ||
1728 | #else | ||
1729 | /* No special procedure is required before calling */ | ||
1730 | static inline void thread_final_exit(struct thread_entry *current) | ||
1731 | #endif | ||
1732 | { | 1266 | { |
1733 | /* At this point, this thread isn't using resources allocated for | 1267 | /* Slot is no longer this thread */ |
1734 | * execution except the slot itself. */ | 1268 | new_thread_id(current); |
1269 | current->name = NULL; | ||
1735 | 1270 | ||
1736 | /* Signal this thread */ | 1271 | /* No longer using resources from creator */ |
1737 | thread_queue_wake(¤t->queue); | 1272 | wait_queue_wake(¤t->queue); |
1273 | |||
1274 | UNLOCK_THREAD(current); | ||
1738 | corelock_unlock(¤t->waiter_cl); | 1275 | corelock_unlock(¤t->waiter_cl); |
1276 | |||
1277 | thread_free(current); | ||
1278 | |||
1739 | switch_thread(); | 1279 | switch_thread(); |
1280 | |||
1740 | /* This should never and must never be reached - if it is, the | 1281 | /* This should never and must never be reached - if it is, the |
1741 | * state is corrupted */ | 1282 | * state is corrupted */ |
1742 | THREAD_PANICF("thread_exit->K:*R", current); | 1283 | THREAD_PANICF("thread_exit->K:*R", current); |
1743 | while (1); | ||
1744 | } | 1284 | } |
1745 | 1285 | ||
1746 | void thread_exit(void) | 1286 | void thread_exit(void) |
1747 | { | 1287 | { |
1748 | register struct thread_entry * current = cores[CURRENT_CORE].running; | 1288 | struct core_entry *corep = __core_id_entry(CURRENT_CORE); |
1289 | register struct thread_entry *current = corep->running; | ||
1749 | 1290 | ||
1750 | /* Cancel CPU boost if any */ | 1291 | /* Cancel CPU boost if any */ |
1751 | cancel_cpu_boost(); | 1292 | cancel_cpu_boost(); |
@@ -1764,24 +1305,21 @@ void thread_exit(void) | |||
1764 | thread_panicf("abandon ship!", current); | 1305 | thread_panicf("abandon ship!", current); |
1765 | #endif /* HAVE_PRIORITY_SCHEDULING */ | 1306 | #endif /* HAVE_PRIORITY_SCHEDULING */ |
1766 | 1307 | ||
1767 | if (current->tmo.prev != NULL) | 1308 | /* Remove from scheduler lists */ |
1768 | { | 1309 | tmo_queue_remove(&corep->tmo, current); |
1769 | /* Cancel pending timeout list removal */ | 1310 | prepare_block(current, STATE_KILLED, -1); |
1770 | remove_from_list_tmo(current); | 1311 | corep->running = NULL; /* No switch_thread context save */ |
1771 | } | ||
1772 | |||
1773 | /* Switch tasks and never return */ | ||
1774 | block_thread_on_l(current, STATE_KILLED); | ||
1775 | |||
1776 | /* Slot must be unusable until thread is really gone */ | ||
1777 | UNLOCK_THREAD_AT_TASK_SWITCH(current); | ||
1778 | 1312 | ||
1779 | /* Update ID for this slot */ | 1313 | #ifdef RB_PROFILE |
1780 | new_thread_id(current->id, current); | 1314 | profile_thread_stopped(THREAD_ID_SLOT(current->id)); |
1781 | current->name = NULL; | 1315 | #endif |
1782 | 1316 | ||
1783 | /* Do final cleanup and remove the thread */ | 1317 | /* Do final release of resources and remove the thread */ |
1784 | thread_final_exit(current); | 1318 | #if NUM_CORES > 1 |
1319 | thread_exit_finalize(current->core, current); | ||
1320 | #else | ||
1321 | thread_exit_final(current); | ||
1322 | #endif | ||
1785 | } | 1323 | } |
1786 | 1324 | ||
1787 | #ifdef HAVE_PRIORITY_SCHEDULING | 1325 | #ifdef HAVE_PRIORITY_SCHEDULING |
@@ -1796,10 +1334,8 @@ int thread_set_priority(unsigned int thread_id, int priority) | |||
1796 | return -1; /* Invalid priority argument */ | 1334 | return -1; /* Invalid priority argument */ |
1797 | 1335 | ||
1798 | int old_base_priority = -1; | 1336 | int old_base_priority = -1; |
1799 | struct thread_entry *thread = thread_id_entry(thread_id); | 1337 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1800 | 1338 | ||
1801 | /* Thread could be on any list and therefore on an interrupt accessible | ||
1802 | one - disable interrupts */ | ||
1803 | const int oldlevel = disable_irq_save(); | 1339 | const int oldlevel = disable_irq_save(); |
1804 | LOCK_THREAD(thread); | 1340 | LOCK_THREAD(thread); |
1805 | 1341 | ||
@@ -1825,7 +1361,7 @@ int thread_set_priority(unsigned int thread_id, int priority) | |||
1825 | { | 1361 | { |
1826 | /* This thread is running - just change location on the run queue. | 1362 | /* This thread is running - just change location on the run queue. |
1827 | Also sets thread->priority. */ | 1363 | Also sets thread->priority. */ |
1828 | set_running_thread_priority(thread, new_priority); | 1364 | set_rtr_thread_priority(thread, new_priority); |
1829 | goto done; | 1365 | goto done; |
1830 | } | 1366 | } |
1831 | 1367 | ||
@@ -1838,7 +1374,7 @@ int thread_set_priority(unsigned int thread_id, int priority) | |||
1838 | } | 1374 | } |
1839 | 1375 | ||
1840 | struct thread_entry *blt = lock_blocker_thread(bl); | 1376 | struct thread_entry *blt = lock_blocker_thread(bl); |
1841 | struct thread_entry **bqp = thread->bqp; | 1377 | struct __wait_queue *wqp = wait_queue_ptr(thread); |
1842 | 1378 | ||
1843 | thread->priority = new_priority; | 1379 | thread->priority = new_priority; |
1844 | 1380 | ||
@@ -1850,7 +1386,7 @@ int thread_set_priority(unsigned int thread_id, int priority) | |||
1850 | if (new_priority < oldblpr) | 1386 | if (new_priority < oldblpr) |
1851 | newblpr = new_priority; | 1387 | newblpr = new_priority; |
1852 | else if (old_priority <= oldblpr) | 1388 | else if (old_priority <= oldblpr) |
1853 | newblpr = find_highest_priority_in_list_l(*bqp); | 1389 | newblpr = wait_queue_find_priority(wqp); |
1854 | 1390 | ||
1855 | if (newblpr == oldblpr) | 1391 | if (newblpr == oldblpr) |
1856 | { | 1392 | { |
@@ -1872,7 +1408,7 @@ done: | |||
1872 | */ | 1408 | */ |
1873 | int thread_get_priority(unsigned int thread_id) | 1409 | int thread_get_priority(unsigned int thread_id) |
1874 | { | 1410 | { |
1875 | struct thread_entry *thread = thread_id_entry(thread_id); | 1411 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1876 | int base_priority = thread->base_priority; | 1412 | int base_priority = thread->base_priority; |
1877 | 1413 | ||
1878 | /* Simply check without locking slot. It may or may not be valid by the | 1414 | /* Simply check without locking slot. It may or may not be valid by the |
@@ -1888,13 +1424,13 @@ int thread_get_priority(unsigned int thread_id) | |||
1888 | #ifdef HAVE_IO_PRIORITY | 1424 | #ifdef HAVE_IO_PRIORITY |
1889 | int thread_get_io_priority(unsigned int thread_id) | 1425 | int thread_get_io_priority(unsigned int thread_id) |
1890 | { | 1426 | { |
1891 | struct thread_entry *thread = thread_id_entry(thread_id); | 1427 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1892 | return thread->io_priority; | 1428 | return thread->io_priority; |
1893 | } | 1429 | } |
1894 | 1430 | ||
1895 | void thread_set_io_priority(unsigned int thread_id,int io_priority) | 1431 | void thread_set_io_priority(unsigned int thread_id,int io_priority) |
1896 | { | 1432 | { |
1897 | struct thread_entry *thread = thread_id_entry(thread_id); | 1433 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1898 | thread->io_priority = io_priority; | 1434 | thread->io_priority = io_priority; |
1899 | } | 1435 | } |
1900 | #endif | 1436 | #endif |
@@ -1907,7 +1443,7 @@ void thread_set_io_priority(unsigned int thread_id,int io_priority) | |||
1907 | */ | 1443 | */ |
1908 | void thread_thaw(unsigned int thread_id) | 1444 | void thread_thaw(unsigned int thread_id) |
1909 | { | 1445 | { |
1910 | struct thread_entry *thread = thread_id_entry(thread_id); | 1446 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1911 | int oldlevel = disable_irq_save(); | 1447 | int oldlevel = disable_irq_save(); |
1912 | 1448 | ||
1913 | LOCK_THREAD(thread); | 1449 | LOCK_THREAD(thread); |
@@ -1926,68 +1462,72 @@ void thread_thaw(unsigned int thread_id) | |||
1926 | * Switch the processor that the currently executing thread runs on. | 1462 | * Switch the processor that the currently executing thread runs on. |
1927 | *--------------------------------------------------------------------------- | 1463 | *--------------------------------------------------------------------------- |
1928 | */ | 1464 | */ |
1465 | static USED_ATTR NORETURN_ATTR | ||
1466 | void switch_core_final(unsigned int old_core, struct thread_entry *current) | ||
1467 | { | ||
1468 | /* Old core won't be using slot resources at this point */ | ||
1469 | core_schedule_wakeup(current); | ||
1470 | UNLOCK_THREAD(current); | ||
1471 | #ifdef RB_PROFILE | ||
1472 | profile_thread_stopped(THREAD_ID_SLOT(current->id)); | ||
1473 | #endif | ||
1474 | switch_thread(); | ||
1475 | /* not reached */ | ||
1476 | THREAD_PANICF("switch_core_final->same core!", current); | ||
1477 | (void)old_core; | ||
1478 | } | ||
1479 | |||
1929 | unsigned int switch_core(unsigned int new_core) | 1480 | unsigned int switch_core(unsigned int new_core) |
1930 | { | 1481 | { |
1931 | const unsigned int core = CURRENT_CORE; | 1482 | const unsigned int old_core = CURRENT_CORE; |
1932 | struct thread_entry *current = cores[core].running; | 1483 | if (old_core == new_core) |
1484 | return old_core; /* No change */ | ||
1933 | 1485 | ||
1934 | if (core == new_core) | 1486 | struct core_entry *corep = __core_id_entry(old_core); |
1935 | { | 1487 | struct thread_entry *current = corep->running; |
1936 | /* No change - just return same core */ | ||
1937 | return core; | ||
1938 | } | ||
1939 | 1488 | ||
1940 | disable_irq(); | 1489 | disable_irq(); |
1941 | LOCK_THREAD(current); | 1490 | LOCK_THREAD(current); |
1942 | 1491 | ||
1943 | /* Get us off the running list for the current core */ | 1492 | /* Remove us from old core lists */ |
1944 | RTR_LOCK(core); | 1493 | tmo_queue_remove(&corep->tmo, current); |
1945 | remove_from_list_l(&cores[core].running, current); | 1494 | core_rtr_remove(corep, current); |
1946 | rtr_subtract_entry(core, current->priority); | 1495 | corep->running = NULL; /* No switch_thread context save */ |
1947 | RTR_UNLOCK(core); | ||
1948 | |||
1949 | /* Stash return value (old core) in a safe place */ | ||
1950 | current->retval = core; | ||
1951 | |||
1952 | /* If a timeout hadn't yet been cleaned-up it must be removed now or | ||
1953 | * the other core will likely attempt a removal from the wrong list! */ | ||
1954 | if (current->tmo.prev != NULL) | ||
1955 | { | ||
1956 | remove_from_list_tmo(current); | ||
1957 | } | ||
1958 | 1496 | ||
1959 | /* Change the core number for this thread slot */ | 1497 | /* Do the actual migration */ |
1960 | current->core = new_core; | 1498 | current->core = new_core; |
1499 | switch_thread_core(old_core, current); | ||
1961 | 1500 | ||
1962 | /* Do not use core_schedule_wakeup here since this will result in | 1501 | /* Executing on new core */ |
1963 | * the thread starting to run on the other core before being finished on | 1502 | return old_core; |
1964 | * this one. Delay the list unlock to keep the other core stuck | 1503 | } |
1965 | * until this thread is ready. */ | 1504 | #endif /* NUM_CORES > 1 */ |
1966 | RTR_LOCK(new_core); | ||
1967 | |||
1968 | rtr_add_entry(new_core, current->priority); | ||
1969 | add_to_list_l(&cores[new_core].running, current); | ||
1970 | |||
1971 | /* Make a callback into device-specific code, unlock the wakeup list so | ||
1972 | * that execution may resume on the new core, unlock our slot and finally | ||
1973 | * restore the interrupt level */ | ||
1974 | cores[core].blk_ops.flags = TBOP_SWITCH_CORE; | ||
1975 | cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl; | ||
1976 | cores[core].block_task = current; | ||
1977 | |||
1978 | UNLOCK_THREAD(current); | ||
1979 | 1505 | ||
1980 | /* Alert other core to activity */ | 1506 | #ifdef HAVE_SCHEDULER_BOOSTCTRL |
1981 | core_wake(new_core); | 1507 | /*--------------------------------------------------------------------------- |
1508 | * Change the boost state of a thread boosting or unboosting the CPU | ||
1509 | * as required. | ||
1510 | *--------------------------------------------------------------------------- | ||
1511 | */ | ||
1512 | static inline void boost_thread(struct thread_entry *thread, bool boost) | ||
1513 | { | ||
1514 | if ((thread->cpu_boost != 0) != boost) | ||
1515 | { | ||
1516 | thread->cpu_boost = boost; | ||
1517 | cpu_boost(boost); | ||
1518 | } | ||
1519 | } | ||
1982 | 1520 | ||
1983 | /* Do the stack switching, cache_maintenence and switch_thread call - | 1521 | void trigger_cpu_boost(void) |
1984 | requires native code */ | 1522 | { |
1985 | switch_thread_core(core, current); | 1523 | boost_thread(__running_self_entry(), true); |
1524 | } | ||
1986 | 1525 | ||
1987 | /* Finally return the old core to caller */ | 1526 | void cancel_cpu_boost(void) |
1988 | return current->retval; | 1527 | { |
1528 | boost_thread(__running_self_entry(), false); | ||
1989 | } | 1529 | } |
1990 | #endif /* NUM_CORES > 1 */ | 1530 | #endif /* HAVE_SCHEDULER_BOOSTCTRL */ |
1991 | 1531 | ||
1992 | /*--------------------------------------------------------------------------- | 1532 | /*--------------------------------------------------------------------------- |
1993 | * Initialize threading API. This assumes interrupts are not yet enabled. On | 1533 | * Initialize threading API. This assumes interrupts are not yet enabled. On |
@@ -1998,127 +1538,56 @@ unsigned int switch_core(unsigned int new_core) | |||
1998 | void INIT_ATTR init_threads(void) | 1538 | void INIT_ATTR init_threads(void) |
1999 | { | 1539 | { |
2000 | const unsigned int core = CURRENT_CORE; | 1540 | const unsigned int core = CURRENT_CORE; |
2001 | struct thread_entry *thread; | ||
2002 | 1541 | ||
2003 | if (core == CPU) | 1542 | if (core == CPU) |
2004 | { | 1543 | { |
2005 | /* Initialize core locks and IDs in all slots */ | 1544 | thread_alloc_init(); /* before using cores! */ |
2006 | int n; | 1545 | |
2007 | for (n = 0; n < MAXTHREADS; n++) | 1546 | /* Create main thread */ |
1547 | struct thread_entry *thread = thread_alloc(); | ||
1548 | if (thread == NULL) | ||
2008 | { | 1549 | { |
2009 | thread = &threads[n]; | 1550 | /* WTF? There really must be a slot available at this stage. |
2010 | corelock_init(&thread->waiter_cl); | 1551 | * This can fail if, for example, .bss isn't zero'ed out by the |
2011 | corelock_init(&thread->slot_cl); | 1552 | * loader or threads is in the wrong section. */ |
2012 | thread->id = THREAD_ID_INIT(n); | 1553 | THREAD_PANICF("init_threads->no slot", NULL); |
2013 | } | 1554 | } |
2014 | } | ||
2015 | |||
2016 | /* CPU will initialize first and then sleep */ | ||
2017 | thread = find_empty_thread_slot(); | ||
2018 | 1555 | ||
2019 | if (thread == NULL) | 1556 | size_t stack_size; |
2020 | { | 1557 | void *stack = __get_main_stack(&stack_size); |
2021 | /* WTF? There really must be a slot available at this stage. | 1558 | new_thread_base_init(thread, &stack, &stack_size, __main_thread_name |
2022 | * This can fail if, for example, .bss isn't zero'ed out by the loader | 1559 | IF_PRIO(, PRIORITY_MAIN_THREAD) IF_COP(, core)); |
2023 | * or threads is in the wrong section. */ | ||
2024 | THREAD_PANICF("init_threads->no slot", NULL); | ||
2025 | } | ||
2026 | 1560 | ||
2027 | /* Initialize initially non-zero members of core */ | 1561 | struct core_entry *corep = __core_id_entry(core); |
2028 | cores[core].next_tmo_check = current_tick; /* Something not in the past */ | 1562 | core_rtr_add(corep, thread); |
1563 | corep->running = thread; | ||
2029 | 1564 | ||
2030 | /* Initialize initially non-zero members of slot */ | 1565 | #ifdef INIT_MAIN_THREAD |
2031 | UNLOCK_THREAD(thread); /* No sync worries yet */ | 1566 | init_main_thread(&thread->context); |
2032 | thread->name = main_thread_name; | ||
2033 | thread->state = STATE_RUNNING; | ||
2034 | IF_COP( thread->core = core; ) | ||
2035 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
2036 | corelock_init(&cores[core].rtr_cl); | ||
2037 | thread->base_priority = PRIORITY_USER_INTERFACE; | ||
2038 | prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE); | ||
2039 | thread->priority = PRIORITY_USER_INTERFACE; | ||
2040 | rtr_add_entry(core, PRIORITY_USER_INTERFACE); | ||
2041 | #endif | 1567 | #endif |
1568 | } | ||
2042 | 1569 | ||
2043 | add_to_list_l(&cores[core].running, thread); | 1570 | #if NUM_CORES > 1 |
2044 | 1571 | /* Boot CPU: | |
2045 | if (core == CPU) | 1572 | * Wait for other processors to finish their inits since create_thread |
2046 | { | 1573 | * isn't safe to call until the kernel inits are done. The first |
2047 | thread->stack = stackbegin; | 1574 | * threads created in the system must of course be created by CPU. |
2048 | thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin; | 1575 | * Another possible approach is to initialize all cores and slots |
2049 | #if NUM_CORES > 1 /* This code path will not be run on single core targets */ | 1576 | * for each core by CPU, let the remainder proceed in parallel and |
2050 | /* Wait for other processors to finish their inits since create_thread | 1577 | * signal CPU when all are finished. |
2051 | * isn't safe to call until the kernel inits are done. The first | 1578 | * |
2052 | * threads created in the system must of course be created by CPU. | 1579 | * Other: |
2053 | * Another possible approach is to initialize all cores and slots | 1580 | * After last processor completes, it should signal all others to |
2054 | * for each core by CPU, let the remainder proceed in parallel and | 1581 | * proceed or may signal the next and call thread_exit(). The last one |
2055 | * signal CPU when all are finished. */ | 1582 | * to finish will signal CPU. |
2056 | core_thread_init(CPU); | 1583 | */ |
2057 | } | 1584 | core_thread_init(core); |
2058 | else | 1585 | |
1586 | if (core != CPU) | ||
2059 | { | 1587 | { |
2060 | /* Initial stack is the idle stack */ | 1588 | /* No main thread on coprocessors - go idle and wait */ |
2061 | thread->stack = idle_stacks[core]; | 1589 | switch_thread(); |
2062 | thread->stack_size = IDLE_STACK_SIZE; | 1590 | THREAD_PANICF("init_threads() - coprocessor returned", NULL); |
2063 | /* After last processor completes, it should signal all others to | ||
2064 | * proceed or may signal the next and call thread_exit(). The last one | ||
2065 | * to finish will signal CPU. */ | ||
2066 | core_thread_init(core); | ||
2067 | /* Other cores do not have a main thread - go idle inside switch_thread | ||
2068 | * until a thread can run on the core. */ | ||
2069 | thread_exit(); | ||
2070 | #endif /* NUM_CORES */ | ||
2071 | } | 1591 | } |
2072 | #ifdef INIT_MAIN_THREAD | 1592 | #endif /* NUM_CORES */ |
2073 | init_main_thread(&thread->context); | ||
2074 | #endif | ||
2075 | } | ||
2076 | |||
2077 | /* Unless otherwise defined, do nothing */ | ||
2078 | #ifndef YIELD_KERNEL_HOOK | ||
2079 | #define YIELD_KERNEL_HOOK() false | ||
2080 | #endif | ||
2081 | #ifndef SLEEP_KERNEL_HOOK | ||
2082 | #define SLEEP_KERNEL_HOOK(ticks) false | ||
2083 | #endif | ||
2084 | |||
2085 | /*--------------------------------------------------------------------------- | ||
2086 | * Suspends a thread's execution for at least the specified number of ticks. | ||
2087 | * | ||
2088 | * May result in CPU core entering wait-for-interrupt mode if no other thread | ||
2089 | * may be scheduled. | ||
2090 | * | ||
2091 | * NOTE: sleep(0) sleeps until the end of the current tick | ||
2092 | * sleep(n) that doesn't result in rescheduling: | ||
2093 | * n <= ticks suspended < n + 1 | ||
2094 | * n to n+1 is a lower bound. Other factors may affect the actual time | ||
2095 | * a thread is suspended before it runs again. | ||
2096 | *--------------------------------------------------------------------------- | ||
2097 | */ | ||
2098 | unsigned sleep(unsigned ticks) | ||
2099 | { | ||
2100 | /* In certain situations, certain bootloaders in particular, a normal | ||
2101 | * threading call is inappropriate. */ | ||
2102 | if (SLEEP_KERNEL_HOOK(ticks)) | ||
2103 | return 0; /* Handled */ | ||
2104 | |||
2105 | disable_irq(); | ||
2106 | sleep_thread(ticks); | ||
2107 | switch_thread(); | ||
2108 | return 0; | ||
2109 | } | ||
2110 | |||
2111 | /*--------------------------------------------------------------------------- | ||
2112 | * Elects another thread to run or, if no other thread may be made ready to | ||
2113 | * run, immediately returns control back to the calling thread. | ||
2114 | *--------------------------------------------------------------------------- | ||
2115 | */ | ||
2116 | void yield(void) | ||
2117 | { | ||
2118 | /* In certain situations, certain bootloaders in particular, a normal | ||
2119 | * threading call is inappropriate. */ | ||
2120 | if (YIELD_KERNEL_HOOK()) | ||
2121 | return; /* handled */ | ||
2122 | |||
2123 | switch_thread(); | ||
2124 | } | 1593 | } |