summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
committerMichael Sevakis <jethead71@rockbox.org>2008-03-25 02:34:12 +0000
commit27cf67733936abd75fcb1f8da765977cd75906ee (patch)
treef894211a8a0c77b402dd3250b2bee2d17dcfe13f
parentbc2f8fd8f38a3e010cd67bbac358f6e9991153c6 (diff)
downloadrockbox-27cf67733936abd75fcb1f8da765977cd75906ee.tar.gz
rockbox-27cf67733936abd75fcb1f8da765977cd75906ee.zip
Add a complete priority inheritance implementation to the scheduler (all mutex ownership and queue_send calls are inheritable). Priorities are differential so that dispatch depends on the runnable range of priorities. Codec priority can therefore be raised in small steps (pcmbuf updated to enable). Simplify the kernel functions to ease implementation and use the same kernel.c for both sim and target (I'm tired of maintaining two ;_). 1) Not sure if a minor audio break at first buffering issue will exist on large-sector disks (the main mutex speed issue was genuinely resolved earlier). At this point it's best dealt with at the buffering level. It seems a larger filechunk could be used again. 2) Perhaps 64-bit sims will have some minor issues (finicky) but a backroll of the code of concern there is a 5-minute job. All kernel objects become incompatible so a full rebuild and update is needed.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16791 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/buffering.c9
-rw-r--r--apps/codecs.c36
-rw-r--r--apps/codecs.h52
-rw-r--r--apps/debug_menu.c11
-rw-r--r--apps/main.c2
-rw-r--r--apps/pcmbuf.c23
-rw-r--r--apps/playback.c10
-rw-r--r--apps/plugin.c5
-rw-r--r--apps/plugin.h14
-rw-r--r--apps/plugins/mpegplayer/audio_thread.c6
-rw-r--r--apps/plugins/mpegplayer/disk_buf.c4
-rw-r--r--apps/plugins/mpegplayer/stream_mgr.c4
-rw-r--r--apps/plugins/mpegplayer/video_thread.c6
-rw-r--r--apps/voice_thread.c4
-rw-r--r--firmware/SOURCES12
-rw-r--r--firmware/common/ffs.c54
-rw-r--r--firmware/drivers/ata.c50
-rw-r--r--firmware/drivers/fat.c6
-rw-r--r--firmware/export/config.h14
-rw-r--r--firmware/export/kernel.h94
-rw-r--r--firmware/export/system.h14
-rw-r--r--firmware/export/thread.h371
-rw-r--r--firmware/kernel.c1329
-rw-r--r--firmware/pcm_record.c7
-rw-r--r--firmware/target/arm/ffs-arm.S74
-rw-r--r--firmware/target/arm/i2c-pp.c2
-rw-r--r--firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c2
-rw-r--r--firmware/target/arm/sandisk/ata-c200_e200.c4
-rw-r--r--firmware/target/coldfire/ffs-coldfire.S62
-rw-r--r--firmware/thread.c2535
-rw-r--r--uisimulator/sdl/SOURCES2
-rw-r--r--uisimulator/sdl/kernel-sdl.c168
-rw-r--r--uisimulator/sdl/kernel.c739
-rw-r--r--uisimulator/sdl/system-sdl.h2
-rw-r--r--uisimulator/sdl/thread-sdl.c372
-rw-r--r--uisimulator/sdl/uisdl.c38
36 files changed, 3079 insertions, 3058 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 64f522c52f..0cb428c947 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -1446,16 +1446,21 @@ void buffering_thread(void)
1446 1446
1447void buffering_init(void) { 1447void buffering_init(void) {
1448 mutex_init(&llist_mutex); 1448 mutex_init(&llist_mutex);
1449#ifdef HAVE_PRIORITY_SCHEDULING
1450 /* This behavior not safe atm */
1451 mutex_set_preempt(&llist_mutex, false);
1452#endif
1449 1453
1450 conf_watermark = BUFFERING_DEFAULT_WATERMARK; 1454 conf_watermark = BUFFERING_DEFAULT_WATERMARK;
1451 1455
1452 queue_init(&buffering_queue, true); 1456 queue_init(&buffering_queue, true);
1453 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list);
1454
1455 buffering_thread_p = create_thread( buffering_thread, buffering_stack, 1457 buffering_thread_p = create_thread( buffering_thread, buffering_stack,
1456 sizeof(buffering_stack), CREATE_THREAD_FROZEN, 1458 sizeof(buffering_stack), CREATE_THREAD_FROZEN,
1457 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING) 1459 buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
1458 IF_COP(, CPU)); 1460 IF_COP(, CPU));
1461
1462 queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
1463 buffering_thread_p);
1459} 1464}
1460 1465
1461/* Initialise the buffering subsystem */ 1466/* Initialise the buffering subsystem */
diff --git a/apps/codecs.c b/apps/codecs.c
index dfae463865..f2c74522cc 100644
--- a/apps/codecs.c
+++ b/apps/codecs.c
@@ -76,6 +76,7 @@ struct codec_api ci = {
76 false, /* stop_codec */ 76 false, /* stop_codec */
77 0, /* new_track */ 77 0, /* new_track */
78 0, /* seek_time */ 78 0, /* seek_time */
79 NULL, /* struct dsp_config *dsp */
79 NULL, /* get_codec_memory */ 80 NULL, /* get_codec_memory */
80 NULL, /* pcmbuf_insert */ 81 NULL, /* pcmbuf_insert */
81 NULL, /* set_elapsed */ 82 NULL, /* set_elapsed */
@@ -95,6 +96,23 @@ struct codec_api ci = {
95 PREFIX(sleep), 96 PREFIX(sleep),
96 yield, 97 yield,
97 98
99#if NUM_CORES > 1
100 create_thread,
101 thread_thaw,
102 thread_wait,
103 semaphore_init,
104 semaphore_wait,
105 semaphore_release,
106 event_init,
107 event_wait,
108 event_set_state,
109#endif
110
111#ifdef CACHE_FUNCTIONS_AS_CALL
112 flush_icache,
113 invalidate_icache,
114#endif
115
98 /* strings and memory */ 116 /* strings and memory */
99 strcpy, 117 strcpy,
100 strncpy, 118 strncpy,
@@ -147,24 +165,6 @@ struct codec_api ci = {
147 /* new stuff at the end, sort into place next time 165 /* new stuff at the end, sort into place next time
148 the API gets incompatible */ 166 the API gets incompatible */
149 167
150#ifdef CACHE_FUNCTIONS_AS_CALL
151 flush_icache,
152 invalidate_icache,
153#endif
154
155 NULL, /* struct dsp_config *dsp */
156
157#if NUM_CORES > 1
158 create_thread,
159 thread_thaw,
160 thread_wait,
161 semaphore_init,
162 semaphore_wait,
163 semaphore_release,
164 event_init,
165 event_wait,
166 event_set_state,
167#endif
168}; 168};
169 169
170void codec_get_full_path(char *path, const char *codec_root_fn) 170void codec_get_full_path(char *path, const char *codec_root_fn)
diff --git a/apps/codecs.h b/apps/codecs.h
index ad6b831b61..fb5675fd84 100644
--- a/apps/codecs.h
+++ b/apps/codecs.h
@@ -80,12 +80,12 @@
80#define CODEC_ENC_MAGIC 0x52454E43 /* RENC */ 80#define CODEC_ENC_MAGIC 0x52454E43 /* RENC */
81 81
82/* increase this every time the api struct changes */ 82/* increase this every time the api struct changes */
83#define CODEC_API_VERSION 22 83#define CODEC_API_VERSION 23
84 84
85/* update this to latest version if a change to the api struct breaks 85/* update this to latest version if a change to the api struct breaks
86 backwards compatibility (and please take the opportunity to sort in any 86 backwards compatibility (and please take the opportunity to sort in any
87 new function which are "waiting" at the end of the function table) */ 87 new function which are "waiting" at the end of the function table) */
88#define CODEC_MIN_API_VERSION 22 88#define CODEC_MIN_API_VERSION 23
89 89
90/* codec return codes */ 90/* codec return codes */
91enum codec_status { 91enum codec_status {
@@ -118,6 +118,9 @@ struct codec_api {
118 /* If seek_time != 0, codec should seek to that song position (in ms) 118 /* If seek_time != 0, codec should seek to that song position (in ms)
119 if codec supports seeking. */ 119 if codec supports seeking. */
120 long seek_time; 120 long seek_time;
121
122 /* The dsp instance to be used for audio output */
123 struct dsp_config *dsp;
121 124
122 /* Returns buffer to malloc array. Only codeclib should need this. */ 125 /* Returns buffer to malloc array. Only codeclib should need this. */
123 void* (*get_codec_memory)(size_t *size); 126 void* (*get_codec_memory)(size_t *size);
@@ -160,6 +163,28 @@ struct codec_api {
160 void (*PREFIX(sleep))(int ticks); 163 void (*PREFIX(sleep))(int ticks);
161 void (*yield)(void); 164 void (*yield)(void);
162 165
166#if NUM_CORES > 1
167 struct thread_entry *
168 (*create_thread)(void (*function)(void), void* stack,
169 size_t stack_size, unsigned flags, const char *name
170 IF_PRIO(, int priority)
171 IF_COP(, unsigned int core));
172
173 void (*thread_thaw)(struct thread_entry *thread);
174 void (*thread_wait)(struct thread_entry *thread);
175 void (*semaphore_init)(struct semaphore *s, int max, int start);
176 void (*semaphore_wait)(struct semaphore *s);
177 void (*semaphore_release)(struct semaphore *s);
178 void (*event_init)(struct event *e, unsigned int flags);
179 void (*event_wait)(struct event *e, unsigned int for_state);
180 void (*event_set_state)(struct event *e, unsigned int state);
181#endif /* NUM_CORES */
182
183#ifdef CACHE_FUNCTIONS_AS_CALL
184 void (*flush_icache)(void);
185 void (*invalidate_icache)(void);
186#endif
187
163 /* strings and memory */ 188 /* strings and memory */
164 char* (*strcpy)(char *dst, const char *src); 189 char* (*strcpy)(char *dst, const char *src);
165 char* (*strncpy)(char *dst, const char *src, size_t length); 190 char* (*strncpy)(char *dst, const char *src, size_t length);
@@ -218,29 +243,6 @@ struct codec_api {
218 /* new stuff at the end, sort into place next time 243 /* new stuff at the end, sort into place next time
219 the API gets incompatible */ 244 the API gets incompatible */
220 245
221#ifdef CACHE_FUNCTIONS_AS_CALL
222 void (*flush_icache)(void);
223 void (*invalidate_icache)(void);
224#endif
225
226 struct dsp_config *dsp;
227
228#if NUM_CORES > 1
229 struct thread_entry *
230 (*create_thread)(void (*function)(void), void* stack,
231 int stack_size, unsigned flags, const char *name
232 IF_PRIO(, int priority)
233 IF_COP(, unsigned int core));
234
235 void (*thread_thaw)(struct thread_entry *thread);
236 void (*thread_wait)(struct thread_entry *thread);
237 void (*semaphore_init)(struct semaphore *s, int max, int start);
238 void (*semaphore_wait)(struct semaphore *s);
239 void (*semaphore_release)(struct semaphore *s);
240 void (*event_init)(struct event *e, unsigned int flags);
241 void (*event_wait)(struct event *e, unsigned int for_state);
242 void (*event_set_state)(struct event *e, unsigned int state);
243#endif /* NUM_CORES */
244}; 246};
245 247
246/* codec header */ 248/* codec header */
diff --git a/apps/debug_menu.c b/apps/debug_menu.c
index d865f12e65..fc509ce236 100644
--- a/apps/debug_menu.c
+++ b/apps/debug_menu.c
@@ -127,11 +127,6 @@ static char thread_status_char(unsigned status)
127 [STATE_KILLED] = 'K', 127 [STATE_KILLED] = 'K',
128 }; 128 };
129 129
130#if NUM_CORES > 1
131 if (status == STATE_BUSY) /* Not a state index */
132 return '.';
133#endif
134
135 if (status > THREAD_NUM_STATES) 130 if (status > THREAD_NUM_STATES)
136 status = THREAD_NUM_STATES; 131 status = THREAD_NUM_STATES;
137 132
@@ -166,15 +161,15 @@ static char* threads_getname(int selected_item, void * data, char *buffer)
166 thread_get_name(name, 32, thread); 161 thread_get_name(name, 32, thread);
167 162
168 snprintf(buffer, MAX_PATH, 163 snprintf(buffer, MAX_PATH,
169 "%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d ") "%2d%% %s", 164 "%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d %d ") "%2d%% %s",
170 selected_item, 165 selected_item,
171 IF_COP(thread->core,) 166 IF_COP(thread->core,)
172#ifdef HAVE_SCHEDULER_BOOSTCTRL 167#ifdef HAVE_SCHEDULER_BOOSTCTRL
173 (thread->boosted) ? '+' : 168 (thread->cpu_boost) ? '+' :
174#endif 169#endif
175 ((thread->state == STATE_RUNNING) ? '*' : ' '), 170 ((thread->state == STATE_RUNNING) ? '*' : ' '),
176 thread_status_char(thread->state), 171 thread_status_char(thread->state),
177 IF_PRIO(thread->priority,) 172 IF_PRIO(thread->base_priority, thread->priority, )
178 thread_stack_usage(thread), name); 173 thread_stack_usage(thread), name);
179 174
180 return buffer; 175 return buffer;
diff --git a/apps/main.c b/apps/main.c
index 5dd92e5e02..a3a2241f44 100644
--- a/apps/main.c
+++ b/apps/main.c
@@ -270,7 +270,7 @@ static void init_tagcache(void)
270 270
271static void init(void) 271static void init(void)
272{ 272{
273 init_threads(); 273 kernel_init();
274 buffer_init(); 274 buffer_init();
275 set_irq_level(0); 275 set_irq_level(0);
276 lcd_init(); 276 lcd_init();
diff --git a/apps/pcmbuf.c b/apps/pcmbuf.c
index 8153118715..8f16c90523 100644
--- a/apps/pcmbuf.c
+++ b/apps/pcmbuf.c
@@ -116,7 +116,7 @@ static bool low_latency_mode = false;
116static bool pcmbuf_flush; 116static bool pcmbuf_flush;
117 117
118#ifdef HAVE_PRIORITY_SCHEDULING 118#ifdef HAVE_PRIORITY_SCHEDULING
119static int codec_thread_priority = 0; 119static int codec_thread_priority = PRIORITY_PLAYBACK;
120#endif 120#endif
121 121
122extern struct thread_entry *codec_thread_p; 122extern struct thread_entry *codec_thread_p;
@@ -256,18 +256,21 @@ static void boost_codec_thread(bool boost)
256 * will starve if the codec thread's priority is boosted. */ 256 * will starve if the codec thread's priority is boosted. */
257 if (boost) 257 if (boost)
258 { 258 {
259 if (codec_thread_priority == 0) 259 int priority = (PRIORITY_PLAYBACK - PRIORITY_PLAYBACK_MAX)*pcmbuf_unplayed_bytes
260 / (2*NATIVE_FREQUENCY) + PRIORITY_PLAYBACK_MAX;
261
262 if (priority != codec_thread_priority)
260 { 263 {
261 codec_thread_priority = thread_set_priority( 264 codec_thread_priority = priority;
262 codec_thread_p, PRIORITY_REALTIME); 265 thread_set_priority(codec_thread_p, priority);
263 voice_thread_set_priority(PRIORITY_REALTIME); 266 voice_thread_set_priority(priority);
264 } 267 }
265 } 268 }
266 else if (codec_thread_priority != 0) 269 else if (codec_thread_priority != PRIORITY_PLAYBACK)
267 { 270 {
268 thread_set_priority(codec_thread_p, codec_thread_priority); 271 thread_set_priority(codec_thread_p, PRIORITY_PLAYBACK);
269 voice_thread_set_priority(codec_thread_priority); 272 voice_thread_set_priority(PRIORITY_PLAYBACK);
270 codec_thread_priority = 0; 273 codec_thread_priority = PRIORITY_PLAYBACK;
271 } 274 }
272} 275}
273#endif /* HAVE_PRIORITY_SCHEDULING */ 276#endif /* HAVE_PRIORITY_SCHEDULING */
@@ -818,7 +821,7 @@ static bool prepare_insert(size_t length)
818 if (low_latency_mode) 821 if (low_latency_mode)
819 { 822 {
820 /* 1/4s latency. */ 823 /* 1/4s latency. */
821 if (pcmbuf_unplayed_bytes > NATIVE_FREQUENCY * 4 / 4 824 if (pcmbuf_unplayed_bytes > NATIVE_FREQUENCY * 4 / 2
822 && pcm_is_playing()) 825 && pcm_is_playing())
823 return false; 826 return false;
824 } 827 }
diff --git a/apps/playback.c b/apps/playback.c
index 7eecd23e35..9005b3485c 100644
--- a/apps/playback.c
+++ b/apps/playback.c
@@ -2549,9 +2549,7 @@ void audio_init(void)
2549 to send messages. Thread creation will be delayed however so nothing 2549 to send messages. Thread creation will be delayed however so nothing
2550 starts running until ready if something yields such as talk_init. */ 2550 starts running until ready if something yields such as talk_init. */
2551 queue_init(&audio_queue, true); 2551 queue_init(&audio_queue, true);
2552 queue_enable_queue_send(&audio_queue, &audio_queue_sender_list);
2553 queue_init(&codec_queue, false); 2552 queue_init(&codec_queue, false);
2554 queue_enable_queue_send(&codec_queue, &codec_queue_sender_list);
2555 queue_init(&pcmbuf_queue, false); 2553 queue_init(&pcmbuf_queue, false);
2556 2554
2557 pcm_init(); 2555 pcm_init();
@@ -2587,11 +2585,17 @@ void audio_init(void)
2587 codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK) 2585 codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK)
2588 IF_COP(, CPU)); 2586 IF_COP(, CPU));
2589 2587
2588 queue_enable_queue_send(&codec_queue, &codec_queue_sender_list,
2589 codec_thread_p);
2590
2590 audio_thread_p = create_thread(audio_thread, audio_stack, 2591 audio_thread_p = create_thread(audio_thread, audio_stack,
2591 sizeof(audio_stack), CREATE_THREAD_FROZEN, 2592 sizeof(audio_stack), CREATE_THREAD_FROZEN,
2592 audio_thread_name IF_PRIO(, PRIORITY_SYSTEM) 2593 audio_thread_name IF_PRIO(, PRIORITY_USER_INTERFACE)
2593 IF_COP(, CPU)); 2594 IF_COP(, CPU));
2594 2595
2596 queue_enable_queue_send(&audio_queue, &audio_queue_sender_list,
2597 audio_thread_p);
2598
2595#ifdef PLAYBACK_VOICE 2599#ifdef PLAYBACK_VOICE
2596 voice_thread_init(); 2600 voice_thread_init();
2597#endif 2601#endif
diff --git a/apps/plugin.c b/apps/plugin.c
index 57f836c5d2..db9bd2574a 100644
--- a/apps/plugin.c
+++ b/apps/plugin.c
@@ -253,15 +253,12 @@ static const struct plugin_api rockbox_api = {
253 /* kernel/ system */ 253 /* kernel/ system */
254 PREFIX(sleep), 254 PREFIX(sleep),
255 yield, 255 yield,
256#ifdef HAVE_PRIORITY_SCHEDULING
257 priority_yield,
258#endif
259 &current_tick, 256 &current_tick,
260 default_event_handler, 257 default_event_handler,
261 default_event_handler_ex, 258 default_event_handler_ex,
262 threads, 259 threads,
263 create_thread, 260 create_thread,
264 remove_thread, 261 thread_exit,
265 thread_wait, 262 thread_wait,
266#if (CONFIG_CODEC == SWCODEC) 263#if (CONFIG_CODEC == SWCODEC)
267 mutex_init, 264 mutex_init,
diff --git a/apps/plugin.h b/apps/plugin.h
index cd426564ba..57624739c7 100644
--- a/apps/plugin.h
+++ b/apps/plugin.h
@@ -119,12 +119,12 @@
119#define PLUGIN_MAGIC 0x526F634B /* RocK */ 119#define PLUGIN_MAGIC 0x526F634B /* RocK */
120 120
121/* increase this every time the api struct changes */ 121/* increase this every time the api struct changes */
122#define PLUGIN_API_VERSION 100 122#define PLUGIN_API_VERSION 101
123 123
124/* update this to latest version if a change to the api struct breaks 124/* update this to latest version if a change to the api struct breaks
125 backwards compatibility (and please take the opportunity to sort in any 125 backwards compatibility (and please take the opportunity to sort in any
126 new function which are "waiting" at the end of the function table) */ 126 new function which are "waiting" at the end of the function table) */
127#define PLUGIN_MIN_API_VERSION 100 127#define PLUGIN_MIN_API_VERSION 101
128 128
129/* plugin return codes */ 129/* plugin return codes */
130enum plugin_status { 130enum plugin_status {
@@ -351,19 +351,16 @@ struct plugin_api {
351 /* kernel/ system */ 351 /* kernel/ system */
352 void (*PREFIX(sleep))(int ticks); 352 void (*PREFIX(sleep))(int ticks);
353 void (*yield)(void); 353 void (*yield)(void);
354#ifdef HAVE_PRIORITY_SCHEDULING
355 void (*priority_yield)(void);
356#endif
357 volatile long* current_tick; 354 volatile long* current_tick;
358 long (*default_event_handler)(long event); 355 long (*default_event_handler)(long event);
359 long (*default_event_handler_ex)(long event, void (*callback)(void *), void *parameter); 356 long (*default_event_handler_ex)(long event, void (*callback)(void *), void *parameter);
360 struct thread_entry* threads; 357 struct thread_entry* threads;
361 struct thread_entry* (*create_thread)(void (*function)(void), void* stack, 358 struct thread_entry* (*create_thread)(void (*function)(void), void* stack,
362 int stack_size, unsigned flags, 359 size_t stack_size, unsigned flags,
363 const char *name 360 const char *name
364 IF_PRIO(, int priority) 361 IF_PRIO(, int priority)
365 IF_COP(, unsigned int core)); 362 IF_COP(, unsigned int core));
366 void (*remove_thread)(struct thread_entry *thread); 363 void (*thread_exit)(void);
367 void (*thread_wait)(struct thread_entry *thread); 364 void (*thread_wait)(struct thread_entry *thread);
368#if CONFIG_CODEC == SWCODEC 365#if CONFIG_CODEC == SWCODEC
369 void (*mutex_init)(struct mutex *m); 366 void (*mutex_init)(struct mutex *m);
@@ -405,7 +402,8 @@ struct plugin_api {
405 int ticks); 402 int ticks);
406#if CONFIG_CODEC == SWCODEC 403#if CONFIG_CODEC == SWCODEC
407 void (*queue_enable_queue_send)(struct event_queue *q, 404 void (*queue_enable_queue_send)(struct event_queue *q,
408 struct queue_sender_list *send); 405 struct queue_sender_list *send,
406 struct thread_entry *owner);
409 bool (*queue_empty)(const struct event_queue *q); 407 bool (*queue_empty)(const struct event_queue *q);
410 void (*queue_wait)(struct event_queue *q, struct queue_event *ev); 408 void (*queue_wait)(struct event_queue *q, struct queue_event *ev);
411 intptr_t (*queue_send)(struct event_queue *q, long id, 409 intptr_t (*queue_send)(struct event_queue *q, long id,
diff --git a/apps/plugins/mpegplayer/audio_thread.c b/apps/plugins/mpegplayer/audio_thread.c
index 2bb766ad88..7d2f849a44 100644
--- a/apps/plugins/mpegplayer/audio_thread.c
+++ b/apps/plugins/mpegplayer/audio_thread.c
@@ -714,12 +714,14 @@ bool audio_thread_init(void)
714 /* Start the audio thread */ 714 /* Start the audio thread */
715 audio_str.hdr.q = &audio_str_queue; 715 audio_str.hdr.q = &audio_str_queue;
716 rb->queue_init(audio_str.hdr.q, false); 716 rb->queue_init(audio_str.hdr.q, false);
717 rb->queue_enable_queue_send(audio_str.hdr.q, &audio_str_queue_send);
718 717
719 /* One-up on the priority since the core DSP over-yields internally */ 718 /* One-up on the priority since the core DSP over-yields internally */
720 audio_str.thread = rb->create_thread( 719 audio_str.thread = rb->create_thread(
721 audio_thread, audio_stack, audio_stack_size, 0, 720 audio_thread, audio_stack, audio_stack_size, 0,
722 "mpgaudio" IF_PRIO(,PRIORITY_PLAYBACK-1) IF_COP(, CPU)); 721 "mpgaudio" IF_PRIO(,PRIORITY_PLAYBACK-4) IF_COP(, CPU));
722
723 rb->queue_enable_queue_send(audio_str.hdr.q, &audio_str_queue_send,
724 audio_str.thread);
723 725
724 if (audio_str.thread == NULL) 726 if (audio_str.thread == NULL)
725 return false; 727 return false;
diff --git a/apps/plugins/mpegplayer/disk_buf.c b/apps/plugins/mpegplayer/disk_buf.c
index a408b90a67..289918fc63 100644
--- a/apps/plugins/mpegplayer/disk_buf.c
+++ b/apps/plugins/mpegplayer/disk_buf.c
@@ -837,7 +837,6 @@ bool disk_buf_init(void)
837 837
838 disk_buf.q = &disk_buf_queue; 838 disk_buf.q = &disk_buf_queue;
839 rb->queue_init(disk_buf.q, false); 839 rb->queue_init(disk_buf.q, false);
840 rb->queue_enable_queue_send(disk_buf.q, &disk_buf_queue_send);
841 840
842 disk_buf.state = TSTATE_EOS; 841 disk_buf.state = TSTATE_EOS;
843 disk_buf.status = STREAM_STOPPED; 842 disk_buf.status = STREAM_STOPPED;
@@ -886,6 +885,9 @@ bool disk_buf_init(void)
886 disk_buf_thread, disk_buf_stack, sizeof(disk_buf_stack), 0, 885 disk_buf_thread, disk_buf_stack, sizeof(disk_buf_stack), 0,
887 "mpgbuffer" IF_PRIO(, PRIORITY_BUFFERING) IF_COP(, CPU)); 886 "mpgbuffer" IF_PRIO(, PRIORITY_BUFFERING) IF_COP(, CPU));
888 887
888 rb->queue_enable_queue_send(disk_buf.q, &disk_buf_queue_send,
889 disk_buf.thread);
890
889 if (disk_buf.thread == NULL) 891 if (disk_buf.thread == NULL)
890 return false; 892 return false;
891 893
diff --git a/apps/plugins/mpegplayer/stream_mgr.c b/apps/plugins/mpegplayer/stream_mgr.c
index 9da664effe..b962c5b993 100644
--- a/apps/plugins/mpegplayer/stream_mgr.c
+++ b/apps/plugins/mpegplayer/stream_mgr.c
@@ -987,7 +987,6 @@ int stream_init(void)
987 987
988 stream_mgr.q = &stream_mgr_queue; 988 stream_mgr.q = &stream_mgr_queue;
989 rb->queue_init(stream_mgr.q, false); 989 rb->queue_init(stream_mgr.q, false);
990 rb->queue_enable_queue_send(stream_mgr.q, &stream_mgr_queue_send);
991 990
992 /* sets audiosize and returns buffer pointer */ 991 /* sets audiosize and returns buffer pointer */
993 mem = rb->plugin_get_audio_buffer(&memsize); 992 mem = rb->plugin_get_audio_buffer(&memsize);
@@ -1028,6 +1027,9 @@ int stream_init(void)
1028 stream_mgr_thread_stack, sizeof(stream_mgr_thread_stack), 1027 stream_mgr_thread_stack, sizeof(stream_mgr_thread_stack),
1029 0, "mpgstream_mgr" IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU)); 1028 0, "mpgstream_mgr" IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU));
1030 1029
1030 rb->queue_enable_queue_send(stream_mgr.q, &stream_mgr_queue_send,
1031 stream_mgr.thread);
1032
1031 if (stream_mgr.thread == NULL) 1033 if (stream_mgr.thread == NULL)
1032 { 1034 {
1033 rb->splash(HZ, "Could not create stream manager thread!"); 1035 rb->splash(HZ, "Could not create stream manager thread!");
diff --git a/apps/plugins/mpegplayer/video_thread.c b/apps/plugins/mpegplayer/video_thread.c
index 6508d28d1d..d16eb771b0 100644
--- a/apps/plugins/mpegplayer/video_thread.c
+++ b/apps/plugins/mpegplayer/video_thread.c
@@ -955,7 +955,7 @@ static void video_thread(void)
955 else 955 else
956 { 956 {
957 /* Just a little left - spin and be accurate */ 957 /* Just a little left - spin and be accurate */
958 rb->priority_yield(); 958 rb->yield();
959 if (str_have_msg(&video_str)) 959 if (str_have_msg(&video_str))
960 goto message_wait; 960 goto message_wait;
961 } 961 }
@@ -998,13 +998,15 @@ bool video_thread_init(void)
998 998
999 video_str.hdr.q = &video_str_queue; 999 video_str.hdr.q = &video_str_queue;
1000 rb->queue_init(video_str.hdr.q, false); 1000 rb->queue_init(video_str.hdr.q, false);
1001 rb->queue_enable_queue_send(video_str.hdr.q, &video_str_queue_send);
1002 1001
1003 /* We put the video thread on another processor for multi-core targets. */ 1002 /* We put the video thread on another processor for multi-core targets. */
1004 video_str.thread = rb->create_thread( 1003 video_str.thread = rb->create_thread(
1005 video_thread, video_stack, VIDEO_STACKSIZE, 0, 1004 video_thread, video_stack, VIDEO_STACKSIZE, 0,
1006 "mpgvideo" IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, COP)); 1005 "mpgvideo" IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, COP));
1007 1006
1007 rb->queue_enable_queue_send(video_str.hdr.q, &video_str_queue_send,
1008 video_str.thread);
1009
1008 if (video_str.thread == NULL) 1010 if (video_str.thread == NULL)
1009 return false; 1011 return false;
1010 1012
diff --git a/apps/voice_thread.c b/apps/voice_thread.c
index 7bf52d4d0d..6e70f43cc5 100644
--- a/apps/voice_thread.c
+++ b/apps/voice_thread.c
@@ -424,12 +424,14 @@ void voice_thread_init(void)
424{ 424{
425 logf("Starting voice thread"); 425 logf("Starting voice thread");
426 queue_init(&voice_queue, false); 426 queue_init(&voice_queue, false);
427 queue_enable_queue_send(&voice_queue, &voice_queue_sender_list);
428 mutex_init(&voice_mutex); 427 mutex_init(&voice_mutex);
429 event_init(&voice_event, STATE_SIGNALED | EVENT_MANUAL); 428 event_init(&voice_event, STATE_SIGNALED | EVENT_MANUAL);
430 voice_thread_p = create_thread(voice_thread, voice_stack, 429 voice_thread_p = create_thread(voice_thread, voice_stack,
431 sizeof(voice_stack), CREATE_THREAD_FROZEN, 430 sizeof(voice_stack), CREATE_THREAD_FROZEN,
432 voice_thread_name IF_PRIO(, PRIORITY_PLAYBACK) IF_COP(, CPU)); 431 voice_thread_name IF_PRIO(, PRIORITY_PLAYBACK) IF_COP(, CPU));
432
433 queue_enable_queue_send(&voice_queue, &voice_queue_sender_list,
434 voice_thread_p);
433} /* voice_thread_init */ 435} /* voice_thread_init */
434 436
435/* Unfreeze the voice thread */ 437/* Unfreeze the voice thread */
diff --git a/firmware/SOURCES b/firmware/SOURCES
index 6ef129f4b9..0a8ac2a8e3 100644
--- a/firmware/SOURCES
+++ b/firmware/SOURCES
@@ -9,11 +9,11 @@ usb.c
9#ifdef ROCKBOX_HAS_LOGF 9#ifdef ROCKBOX_HAS_LOGF
10logf.c 10logf.c
11#endif /* ROCKBOX_HAS_LOGF */ 11#endif /* ROCKBOX_HAS_LOGF */
12kernel.c
12#ifndef SIMULATOR 13#ifndef SIMULATOR
13#ifdef RB_PROFILE 14#ifdef RB_PROFILE
14profile.c 15profile.c
15#endif /* RB_PROFILE */ 16#endif /* RB_PROFILE */
16kernel.c
17rolo.c 17rolo.c
18thread.c 18thread.c
19timer.c 19timer.c
@@ -274,6 +274,10 @@ target/sh/archos/descramble.S
274 274
275#ifndef SIMULATOR 275#ifndef SIMULATOR
276target/coldfire/crt0.S 276target/coldfire/crt0.S
277#ifdef HAVE_PRIORITY_SCHEDULING
278common/ffs.c
279target/coldfire/ffs-coldfire.S
280#endif
277target/coldfire/memcpy-coldfire.S 281target/coldfire/memcpy-coldfire.S
278target/coldfire/memmove-coldfire.S 282target/coldfire/memmove-coldfire.S
279target/coldfire/memset-coldfire.S 283target/coldfire/memset-coldfire.S
@@ -299,6 +303,9 @@ common/strlen.c
299#ifndef SIMULATOR 303#ifndef SIMULATOR
300target/arm/memset-arm.S 304target/arm/memset-arm.S
301target/arm/memset16-arm.S 305target/arm/memset16-arm.S
306#ifdef HAVE_PRIORITY_SCHEDULING
307target/arm/ffs-arm.S
308#endif
302#if CONFIG_I2C == I2C_PP5024 || CONFIG_I2C == I2C_PP5020 || CONFIG_I2C == I2C_PP5002 309#if CONFIG_I2C == I2C_PP5024 || CONFIG_I2C == I2C_PP5020 || CONFIG_I2C == I2C_PP5002
303target/arm/i2c-pp.c 310target/arm/i2c-pp.c
304#elif CONFIG_I2C == I2C_PNX0101 311#elif CONFIG_I2C == I2C_PNX0101
@@ -345,6 +352,9 @@ target/arm/crt0.S
345 352
346#else 353#else
347 354
355#ifdef HAVE_PRIORITY_SCHEDULING
356common/ffs.c
357#endif
348common/memcpy.c 358common/memcpy.c
349common/memmove.c 359common/memmove.c
350common/memset.c 360common/memset.c
diff --git a/firmware/common/ffs.c b/firmware/common/ffs.c
new file mode 100644
index 0000000000..e3dc9b0dc5
--- /dev/null
+++ b/firmware/common/ffs.c
@@ -0,0 +1,54 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2008 by Michael Sevakis
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19#include "config.h"
20#include <inttypes.h>
21
22/* find_first_set_bit() - this is a C version of the ffs algorithm devised
23 * by D.Seal and posted to comp.sys.arm on 16 Feb 1994.
24 *
25 * Find the index of the least significant set bit in the word.
26 * return values:
27 * 0 - bit 0 is set
28 * 1 - bit 1 is set
29 * ...
30 * 31 - bit 31 is set
31 * 32 - no bits set
32 */
33
34/* Table shared with assembly code */
35const uint8_t L_ffs_table[64] ICONST_ATTR =
36{
37/* 0 1 2 3 4 5 6 7 */
38/* ----------------------------------------- */
39 32, 0, 1, 12, 2, 6, 0, 13, /* 0- 7 */
40 3, 0, 7, 0, 0, 0, 0, 14, /* 8-15 */
41 10, 4, 0, 0, 8, 0, 0, 25, /* 16-23 */
42 0, 0, 0, 0, 0, 21, 27, 15, /* 24-31 */
43 31, 11, 5, 0, 0, 0, 0, 0, /* 32-39 */
44 9, 0, 0, 24, 0, 0, 20, 26, /* 40-47 */
45 30, 0, 0, 0, 0, 23, 0, 19, /* 48-55 */
46 29, 0, 22, 18, 28, 17, 16, 0, /* 56-63 */
47};
48
49#if !defined(CPU_COLDFIRE)
50int find_first_set_bit(uint32_t val)
51{
52 return L_ffs_table[((val & -val)*0x0450fbaf) >> 26];
53}
54#endif
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c
index e067235d95..56b303da8d 100644
--- a/firmware/drivers/ata.c
+++ b/firmware/drivers/ata.c
@@ -95,52 +95,6 @@ static unsigned short identify_info[SECTOR_SIZE/2];
95 95
96#ifdef MAX_PHYS_SECTOR_SIZE 96#ifdef MAX_PHYS_SECTOR_SIZE
97 97
98/** This is temporary **/
99/* Define the mutex functions to use the special hack object */
100#define mutex_init ata_spin_init
101#define mutex_lock ata_spin_lock
102#define mutex_unlock ata_spin_unlock
103
104void ata_spin_init(struct mutex *m)
105{
106 m->thread = NULL;
107 m->locked = 0;
108 m->count = 0;
109#if CONFIG_CORELOCK == SW_CORELOCK
110 corelock_init(&m->cl);
111#endif
112}
113
114void ata_spin_lock(struct mutex *m)
115{
116 struct thread_entry *current = thread_get_current();
117
118 if (current == m->thread)
119 {
120 m->count++;
121 return;
122 }
123
124 while (test_and_set(&m->locked, 1, &m->cl))
125 yield();
126
127 m->thread = current;
128}
129
130void ata_spin_unlock(struct mutex *m)
131{
132 if (m->count > 0)
133 {
134 m->count--;
135 return;
136 }
137
138 m->thread = NULL;
139 test_and_set(&m->locked, 0, &m->cl);
140}
141
142/****/
143
144struct sector_cache_entry { 98struct sector_cache_entry {
145 bool inuse; 99 bool inuse;
146 unsigned long sectornum; /* logical sector */ 100 unsigned long sectornum; /* logical sector */
@@ -163,7 +117,7 @@ STATICIRAM int wait_for_bsy(void)
163 long timeout = current_tick + HZ*30; 117 long timeout = current_tick + HZ*30;
164 while (TIME_BEFORE(current_tick, timeout) && (ATA_STATUS & STATUS_BSY)) { 118 while (TIME_BEFORE(current_tick, timeout) && (ATA_STATUS & STATUS_BSY)) {
165 last_disk_activity = current_tick; 119 last_disk_activity = current_tick;
166 priority_yield(); 120 yield();
167 } 121 }
168 122
169 if (TIME_BEFORE(current_tick, timeout)) 123 if (TIME_BEFORE(current_tick, timeout))
@@ -185,7 +139,7 @@ STATICIRAM int wait_for_rdy(void)
185 while (TIME_BEFORE(current_tick, timeout) && 139 while (TIME_BEFORE(current_tick, timeout) &&
186 !(ATA_ALT_STATUS & STATUS_RDY)) { 140 !(ATA_ALT_STATUS & STATUS_RDY)) {
187 last_disk_activity = current_tick; 141 last_disk_activity = current_tick;
188 priority_yield(); 142 yield();
189 } 143 }
190 144
191 if (TIME_BEFORE(current_tick, timeout)) 145 if (TIME_BEFORE(current_tick, timeout))
diff --git a/firmware/drivers/fat.c b/firmware/drivers/fat.c
index 8ae3b70cd3..a538b92695 100644
--- a/firmware/drivers/fat.c
+++ b/firmware/drivers/fat.c
@@ -259,6 +259,12 @@ void fat_init(void)
259 mutex_init(&cache_mutex); 259 mutex_init(&cache_mutex);
260 } 260 }
261 261
262#ifdef HAVE_PRIORITY_SCHEDULING
263 /* Disable this because it is dangerous due to the assumption that
264 * mutex_unlock won't yield */
265 mutex_set_preempt(&cache_mutex, false);
266#endif
267
262 /* mark the FAT cache as unused */ 268 /* mark the FAT cache as unused */
263 for(i = 0;i < FAT_CACHE_SIZE;i++) 269 for(i = 0;i < FAT_CACHE_SIZE;i++)
264 { 270 {
diff --git a/firmware/export/config.h b/firmware/export/config.h
index 6a04504613..1a288dd590 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -371,10 +371,20 @@
371#endif 371#endif
372 372
373/* define for all cpus from ARM family */ 373/* define for all cpus from ARM family */
374#if (CONFIG_CPU == IMX31L)
375#define CPU_ARM
376#define ARM_ARCH 6 /* ARMv6 */
377#endif
378
379#if defined(CPU_TCC77X) || defined(CPU_TCC780X)
380#define CPU_ARM
381#define ARM_ARCH 5 /* ARMv5 */
382#endif
383
374#if defined(CPU_PP) || (CONFIG_CPU == PNX0101) || (CONFIG_CPU == S3C2440) \ 384#if defined(CPU_PP) || (CONFIG_CPU == PNX0101) || (CONFIG_CPU == S3C2440) \
375 || (CONFIG_CPU == DSC25) || (CONFIG_CPU == IMX31L) || (CONFIG_CPU == DM320) \ 385 || (CONFIG_CPU == DSC25) || (CONFIG_CPU == DM320)
376 || defined(CPU_TCC77X) || defined(CPU_TCC780X)
377#define CPU_ARM 386#define CPU_ARM
387#define ARM_ARCH 4 /* ARMv4 */
378#endif 388#endif
379 389
380/* Determine if accesses should be strictly long aligned. */ 390/* Determine if accesses should be strictly long aligned. */
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 70a2f98d59..78403c8b7d 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -76,6 +76,8 @@
76#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0) 76#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
77#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1) 77#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
78 78
79#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
80
79struct queue_event 81struct queue_event
80{ 82{
81 long id; 83 long id;
@@ -87,68 +89,92 @@ struct queue_sender_list
87{ 89{
88 /* If non-NULL, there is a thread waiting for the corresponding event */ 90 /* If non-NULL, there is a thread waiting for the corresponding event */
89 /* Must be statically allocated to put in non-cached ram. */ 91 /* Must be statically allocated to put in non-cached ram. */
90 struct thread_entry *senders[QUEUE_LENGTH]; 92 struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
93 struct thread_entry *list; /* list of senders in map */
91 /* Send info for last message dequeued or NULL if replied or not sent */ 94 /* Send info for last message dequeued or NULL if replied or not sent */
92 struct thread_entry *curr_sender; 95 struct thread_entry *curr_sender;
96#ifdef HAVE_PRIORITY_SCHEDULING
97 struct blocker blocker;
98#endif
93}; 99};
94#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 100#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
95 101
102#ifdef HAVE_PRIORITY_SCHEDULING
103#define QUEUE_GET_THREAD(q) \
104 (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
105#else
106/* Queue without priority enabled have no owner provision _at this time_ */
107#define QUEUE_GET_THREAD(q) \
108 (NULL)
109#endif
110
96struct event_queue 111struct event_queue
97{ 112{
98 struct thread_queue queue; /* Waiter list */ 113 struct thread_entry *queue; /* waiter list */
99 struct queue_event events[QUEUE_LENGTH]; /* list of events */ 114 struct queue_event events[QUEUE_LENGTH]; /* list of events */
100 unsigned int read; /* head of queue */ 115 unsigned int read; /* head of queue */
101 unsigned int write; /* tail of queue */ 116 unsigned int write; /* tail of queue */
102#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 117#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
103 struct queue_sender_list *send; /* list of threads waiting for 118 struct queue_sender_list *send; /* list of threads waiting for
104 reply to an event */ 119 reply to an event */
120#ifdef HAVE_PRIORITY_SCHEDULING
121 struct blocker *blocker_p; /* priority inheritance info
122 for sync message senders */
105#endif 123#endif
106#if NUM_CORES > 1
107 struct corelock cl; /* inter-core sync */
108#endif 124#endif
125 IF_COP( struct corelock cl; ) /* multiprocessor sync */
109}; 126};
110 127
128#ifdef HAVE_PRIORITY_SCHEDULING
129#define MUTEX_SET_THREAD(m, t) ((m)->blocker.thread = (t))
130#define MUTEX_GET_THREAD(m) ((m)->blocker.thread)
131#else
132#define MUTEX_SET_THREAD(m, t) ((m)->thread = (t))
133#define MUTEX_GET_THREAD(m) ((m)->thread)
134#endif
135
111struct mutex 136struct mutex
112{ 137{
113 struct thread_entry *queue; /* Waiter list */ 138 struct thread_entry *queue; /* waiter list */
114#if CONFIG_CORELOCK == SW_CORELOCK 139 int count; /* lock owner recursion count */
115 struct corelock cl; /* inter-core sync */ 140#ifdef HAVE_PRIORITY_SCHEDULING
141 struct blocker blocker; /* priority inheritance info
142 for waiters */
143 bool no_preempt; /* don't allow higher-priority thread
144 to be scheduled even if woken */
145#else
146 struct thread_entry *thread;
116#endif 147#endif
117 struct thread_entry *thread; /* thread that owns lock */ 148 IF_COP( struct corelock cl; ) /* multiprocessor sync */
118 int count; /* lock owner recursion count */ 149 unsigned char locked; /* locked semaphore */
119 unsigned char locked; /* locked semaphore */
120}; 150};
121 151
122#if NUM_CORES > 1 152#if NUM_CORES > 1
123struct spinlock 153struct spinlock
124{ 154{
125 struct corelock cl; /* inter-core sync */ 155 struct thread_entry *thread; /* lock owner */
126 struct thread_entry *thread; /* lock owner */ 156 int count; /* lock owner recursion count */
127 int count; /* lock owner recursion count */ 157 struct corelock cl; /* multiprocessor sync */
128}; 158};
129#endif 159#endif
130 160
131#ifdef HAVE_SEMAPHORE_OBJECTS 161#ifdef HAVE_SEMAPHORE_OBJECTS
132struct semaphore 162struct semaphore
133{ 163{
134 struct thread_entry *queue; /* Waiter list */ 164 struct thread_entry *queue; /* Waiter list */
135#if CONFIG_CORELOCK == SW_CORELOCK 165 int count; /* # of waits remaining before unsignaled */
136 struct corelock cl; /* inter-core sync */ 166 int max; /* maximum # of waits to remain signaled */
137#endif 167 IF_COP( struct corelock cl; ) /* multiprocessor sync */
138 int count; /* # of waits remaining before unsignaled */
139 int max; /* maximum # of waits to remain signaled */
140}; 168};
141#endif 169#endif
142 170
143#ifdef HAVE_EVENT_OBJECTS 171#ifdef HAVE_EVENT_OBJECTS
144struct event 172struct event
145{ 173{
146 struct thread_entry *queues[2]; /* waiters for each state */ 174 struct thread_entry *queues[2]; /* waiters for each state */
147#if CONFIG_CORELOCK == SW_CORELOCK 175 unsigned char automatic; /* event performs auto-reset */
148 struct corelock cl; /* inter-core sync */ 176 unsigned char state; /* state: 1 = signaled */
149#endif 177 IF_COP( struct corelock cl; ) /* multiprocessor sync */
150 unsigned char automatic; /* event performs auto-reset */
151 unsigned char state; /* state: 1 = signaled */
152}; 178};
153#endif 179#endif
154 180
@@ -208,7 +234,9 @@ extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
208 int ticks); 234 int ticks);
209extern void queue_post(struct event_queue *q, long id, intptr_t data); 235extern void queue_post(struct event_queue *q, long id, intptr_t data);
210#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 236#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
211extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send); 237extern void queue_enable_queue_send(struct event_queue *q,
238 struct queue_sender_list *send,
239 struct thread_entry *owner);
212extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data); 240extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
213extern void queue_reply(struct event_queue *q, intptr_t retval); 241extern void queue_reply(struct event_queue *q, intptr_t retval);
214extern bool queue_in_queue_send(struct event_queue *q); 242extern bool queue_in_queue_send(struct event_queue *q);
@@ -223,6 +251,11 @@ extern int queue_broadcast(long id, intptr_t data);
223extern void mutex_init(struct mutex *m); 251extern void mutex_init(struct mutex *m);
224extern void mutex_lock(struct mutex *m); 252extern void mutex_lock(struct mutex *m);
225extern void mutex_unlock(struct mutex *m); 253extern void mutex_unlock(struct mutex *m);
254#ifdef HAVE_PRIORITY_SCHEDULING
255/* Temporary function to disable mutex preempting a thread on unlock */
256static inline void mutex_set_preempt(struct mutex *m, bool preempt)
257 { m->no_preempt = !preempt; }
258#endif
226#if NUM_CORES > 1 259#if NUM_CORES > 1
227extern void spinlock_init(struct spinlock *l); 260extern void spinlock_init(struct spinlock *l);
228extern void spinlock_lock(struct spinlock *l); 261extern void spinlock_lock(struct spinlock *l);
@@ -240,6 +273,5 @@ extern void event_init(struct event *e, unsigned int flags);
240extern void event_wait(struct event *e, unsigned int for_state); 273extern void event_wait(struct event *e, unsigned int for_state);
241extern void event_set_state(struct event *e, unsigned int state); 274extern void event_set_state(struct event *e, unsigned int state);
242#endif /* HAVE_EVENT_OBJECTS */ 275#endif /* HAVE_EVENT_OBJECTS */
243#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
244 276
245#endif /* _KERNEL_H_ */ 277#endif /* _KERNEL_H_ */
diff --git a/firmware/export/system.h b/firmware/export/system.h
index b973b57fd9..dc3853211b 100644
--- a/firmware/export/system.h
+++ b/firmware/export/system.h
@@ -159,6 +159,20 @@ int get_cpu_boost_counter(void);
159#define H_TO_BE32(x) (x) 159#define H_TO_BE32(x) (x)
160#endif 160#endif
161 161
162/* Get the byte offset of a type's member */
163#define OFFSETOF(type, membername) ((off_t)&((type *)0)->membername)
164
165/* Get the type pointer from one of its members */
166#define TYPE_FROM_MEMBER(type, memberptr, membername) \
167 ((type *)((intptr_t)(memberptr) - OFFSETOF(type, membername)))
168
169/* returns index of first set bit + 1 or 0 if no bits are set */
170int find_first_set_bit(uint32_t val);
171
172static inline __attribute__((always_inline))
173uint32_t isolate_first_bit(uint32_t val)
174 { return val & -val; }
175
162/* gcc 3.4 changed the format of the constraints */ 176/* gcc 3.4 changed the format of the constraints */
163#if (__GNUC__ >= 3) && (__GNUC_MINOR__ > 3) || (__GNUC__ >= 4) 177#if (__GNUC__ >= 3) && (__GNUC_MINOR__ > 3) || (__GNUC__ >= 4)
164#define I_CONSTRAINT "I08" 178#define I_CONSTRAINT "I08"
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index dd97ab1e83..bb1cb7cd17 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -26,21 +26,35 @@
26 26
27/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works 27/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
28 * by giving high priority threads more CPU time than less priority threads 28 * by giving high priority threads more CPU time than less priority threads
29 * when they need it. 29 * when they need it. Priority is differential such that the priority
30 * 30 * difference between a lower priority runnable thread and the highest priority
31 * runnable thread determines the amount of aging nescessary for the lower
32 * priority thread to be scheduled in order to prevent starvation.
33 *
31 * If software playback codec pcm buffer is going down to critical, codec 34 * If software playback codec pcm buffer is going down to critical, codec
32 * can change it own priority to REALTIME to override user interface and 35 * can gradually raise its own priority to override user interface and
33 * prevent playback skipping. 36 * prevent playback skipping.
34 */ 37 */
38#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
39#define PRIORITY_RESERVED_LOW 32 /* Reserved */
35#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */ 40#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
36#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */ 41#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
37#define PRIORITY_REALTIME 1 42/* Realtime range reserved for threads that will not allow threads of lower
38#define PRIORITY_USER_INTERFACE 4 /* The main thread */ 43 * priority to age and run (future expansion) */
39#define PRIORITY_RECORDING 4 /* Recording thread */ 44#define PRIORITY_REALTIME_1 1
40#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */ 45#define PRIORITY_REALTIME_2 2
41#define PRIORITY_BUFFERING 4 /* Codec buffering thread */ 46#define PRIORITY_REALTIME_3 3
42#define PRIORITY_SYSTEM 6 /* All other firmware threads */ 47#define PRIORITY_REALTIME_4 4
43#define PRIORITY_BACKGROUND 8 /* Normal application threads */ 48#define PRIORITY_REALTIME 4 /* Lowest realtime range */
49#define PRIORITY_USER_INTERFACE 16 /* The main thread */
50#define PRIORITY_RECORDING 16 /* Recording thread */
51#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
52#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
53#define PRIORITY_BUFFERING 16 /* Codec buffering thread */
54#define PRIORITY_SYSTEM 18 /* All other firmware threads */
55#define PRIORITY_BACKGROUND 20 /* Normal application threads */
56#define NUM_PRIORITIES 32
57#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
44 58
45/* TODO: Only a minor tweak to create_thread would be needed to let 59/* TODO: Only a minor tweak to create_thread would be needed to let
46 * thread slots be caller allocated - no essential threading functionality 60 * thread slots be caller allocated - no essential threading functionality
@@ -59,80 +73,40 @@
59 73
60#define DEFAULT_STACK_SIZE 0x400 /* Bytes */ 74#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
61 75
62/**
63 * "Busy" values that can be swapped into a variable to indicate
64 * that the variable or object pointed to is in use by another processor
65 * core. When accessed, the busy value is swapped-in while the current
66 * value is atomically returned. If the swap returns the busy value,
67 * the processor should retry the operation until some other value is
68 * returned. When modification is finished, the new value should be
69 * written which unlocks it and updates it atomically.
70 *
71 * Procedure:
72 * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
73 *
74 * Modify/examine object at mem location or variable. Create "new_value"
75 * as suitable.
76 *
77 * variable = new_value or curr_value;
78 *
79 * To check a value for busy and perform an operation if not:
80 * curr_value = swap(&variable, BUSY_VALUE);
81 *
82 * if (curr_value != BUSY_VALUE)
83 * {
84 * Modify/examine object at mem location or variable. Create "new_value"
85 * as suitable.
86 * variable = new_value or curr_value;
87 * }
88 * else
89 * {
90 * Do nothing - already busy
91 * }
92 *
93 * Only ever restore when an actual value is returned or else it could leave
94 * the variable locked permanently if another processor unlocked in the
95 * meantime. The next access attempt would deadlock for all processors since
96 * an abandoned busy status would be left behind.
97 */
98#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
99#define STATE_BUSYu8 UINT8_MAX
100#define STATE_BUSYi INT_MIN
101
102#ifndef SIMULATOR 76#ifndef SIMULATOR
103/* Need to keep structures inside the header file because debug_menu 77/* Need to keep structures inside the header file because debug_menu
104 * needs them. */ 78 * needs them. */
105#ifdef CPU_COLDFIRE 79#ifdef CPU_COLDFIRE
106struct regs 80struct regs
107{ 81{
108 unsigned int macsr; /* 0 - EMAC status register */ 82 uint32_t macsr; /* 0 - EMAC status register */
109 unsigned int d[6]; /* 4-24 - d2-d7 */ 83 uint32_t d[6]; /* 4-24 - d2-d7 */
110 unsigned int a[5]; /* 28-44 - a2-a6 */ 84 uint32_t a[5]; /* 28-44 - a2-a6 */
111 void *sp; /* 48 - Stack pointer (a7) */ 85 uint32_t sp; /* 48 - Stack pointer (a7) */
112 void *start; /* 52 - Thread start address, or NULL when started */ 86 uint32_t start; /* 52 - Thread start address, or NULL when started */
113}; 87};
114#elif CONFIG_CPU == SH7034 88#elif CONFIG_CPU == SH7034
115struct regs 89struct regs
116{ 90{
117 unsigned int r[7]; /* 0-24 - Registers r8 thru r14 */ 91 uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
118 void *sp; /* 28 - Stack pointer (r15) */ 92 uint32_t sp; /* 28 - Stack pointer (r15) */
119 void *pr; /* 32 - Procedure register */ 93 uint32_t pr; /* 32 - Procedure register */
120 void *start; /* 36 - Thread start address, or NULL when started */ 94 uint32_t start; /* 36 - Thread start address, or NULL when started */
121}; 95};
122#elif defined(CPU_ARM) 96#elif defined(CPU_ARM)
123struct regs 97struct regs
124{ 98{
125 unsigned int r[8]; /* 0-28 - Registers r4-r11 */ 99 uint32_t r[8]; /* 0-28 - Registers r4-r11 */
126 void *sp; /* 32 - Stack pointer (r13) */ 100 uint32_t sp; /* 32 - Stack pointer (r13) */
127 unsigned int lr; /* 36 - r14 (lr) */ 101 uint32_t lr; /* 36 - r14 (lr) */
128 void *start; /* 40 - Thread start address, or NULL when started */ 102 uint32_t start; /* 40 - Thread start address, or NULL when started */
129}; 103};
130#endif /* CONFIG_CPU */ 104#endif /* CONFIG_CPU */
131#else 105#else
132struct regs 106struct regs
133{ 107{
134 void *t; /* Simulator OS thread */ 108 void *t; /* Simulator OS thread */
135 void *c; /* Condition for blocking and sync */ 109 void *s; /* Semaphore for blocking and wakeup */
136 void (*start)(void); /* Start function */ 110 void (*start)(void); /* Start function */
137}; 111};
138#endif /* !SIMULATOR */ 112#endif /* !SIMULATOR */
@@ -154,13 +128,13 @@ enum
154 thread_thaw is called with its ID */ 128 thread_thaw is called with its ID */
155 THREAD_NUM_STATES, 129 THREAD_NUM_STATES,
156 TIMEOUT_STATE_FIRST = STATE_SLEEPING, 130 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
157#if NUM_CORES > 1
158 STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
159#endif
160}; 131};
161 132
162#if NUM_CORES > 1 133#if NUM_CORES > 1
163#define THREAD_DESTRUCT ((const char *)0x84905617) 134/* Pointer value for name field to indicate thread is being killed. Using
135 * an alternate STATE_* won't work since that would interfere with operation
136 * while the thread is still running. */
137#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
164#endif 138#endif
165 139
166/* Link information for lists thread is in */ 140/* Link information for lists thread is in */
@@ -188,7 +162,7 @@ void corelock_unlock(struct corelock *cl);
188/* Use native atomic swap/exchange instruction */ 162/* Use native atomic swap/exchange instruction */
189struct corelock 163struct corelock
190{ 164{
191 unsigned char locked; 165 volatile unsigned char locked;
192} __attribute__((packed)); 166} __attribute__((packed));
193 167
194#define corelock_init(cl) \ 168#define corelock_init(cl) \
@@ -207,15 +181,36 @@ struct corelock
207#define corelock_unlock(cl) 181#define corelock_unlock(cl)
208#endif /* core locking selection */ 182#endif /* core locking selection */
209 183
210struct thread_queue 184#ifdef HAVE_PRIORITY_SCHEDULING
185struct blocker
211{ 186{
212 struct thread_entry *queue; /* list of threads waiting - 187 struct thread_entry *thread; /* thread blocking other threads
213 _must_ be first member */ 188 (aka. object owner) */
214#if CONFIG_CORELOCK == SW_CORELOCK 189 int priority; /* highest priority waiter */
215 struct corelock cl; /* lock for atomic list operations */ 190 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
216#endif 191};
192
193/* Choices of wakeup protocol */
194
195/* For transfer of object ownership by one thread to another thread by
196 * the owning thread itself (mutexes) */
197struct thread_entry *
198 wakeup_priority_protocol_transfer(struct thread_entry *thread);
199
200/* For release by owner where ownership doesn't change - other threads,
201 * interrupts, timeouts, etc. (mutex timeout, queues) */
202struct thread_entry *
203 wakeup_priority_protocol_release(struct thread_entry *thread);
204
205
206struct priority_distribution
207{
208 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
209 uint32_t mask; /* Bitmask of hist entries that are not zero */
217}; 210};
218 211
212#endif /* HAVE_PRIORITY_SCHEDULING */
213
219/* Information kept in each thread slot 214/* Information kept in each thread slot
220 * members are arranged according to size - largest first - in order 215 * members are arranged according to size - largest first - in order
221 * to ensure both alignment and packing at the same time. 216 * to ensure both alignment and packing at the same time.
@@ -224,88 +219,83 @@ struct thread_entry
224{ 219{
225 struct regs context; /* Register context at switch - 220 struct regs context; /* Register context at switch -
226 _must_ be first member */ 221 _must_ be first member */
227 void *stack; /* Pointer to top of stack */ 222 uintptr_t *stack; /* Pointer to top of stack */
228 const char *name; /* Thread name */ 223 const char *name; /* Thread name */
229 long tmo_tick; /* Tick when thread should be woken from 224 long tmo_tick; /* Tick when thread should be woken from
230 timeout */ 225 timeout -
226 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
231 struct thread_list l; /* Links for blocked/waking/running - 227 struct thread_list l; /* Links for blocked/waking/running -
232 circular linkage in both directions */ 228 circular linkage in both directions */
233 struct thread_list tmo; /* Links for timeout list - 229 struct thread_list tmo; /* Links for timeout list -
234 Self-pointer-terminated in reverse direction, 230 Circular in reverse direction, NULL-terminated in
235 NULL-terminated in forward direction */ 231 forward direction -
236 struct thread_queue *bqp; /* Pointer to list variable in kernel 232 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
233 struct thread_entry **bqp; /* Pointer to list variable in kernel
237 object where thread is blocked - used 234 object where thread is blocked - used
238 for implicit unblock and explicit wake */ 235 for implicit unblock and explicit wake
239#if CONFIG_CORELOCK == SW_CORELOCK 236 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
240 struct thread_entry **bqnlp; /* Pointer to list variable in kernel 237#if NUM_CORES > 1
241 object where thread is blocked - non-locked 238 struct corelock *obj_cl; /* Object corelock where thead is blocked -
242 operations will be used */ 239 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
243#endif 240#endif
244 struct thread_entry *queue; /* List of threads waiting for thread to be 241 struct thread_entry *queue; /* List of threads waiting for thread to be
245 removed */ 242 removed */
246#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 243#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
247 intptr_t retval; /* Return value from a blocked operation */ 244 #define HAVE_WAKEUP_EXT_CB
245 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
246 performs special steps needed when being
247 forced off of an object's wait queue that
248 go beyond the standard wait queue removal
249 and priority disinheritance */
250 /* Only enabled when using queue_send for now */
251#endif
252#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
253 intptr_t retval; /* Return value from a blocked operation/
254 misc. use */
248#endif 255#endif
249#ifdef HAVE_PRIORITY_SCHEDULING 256#ifdef HAVE_PRIORITY_SCHEDULING
250 long last_run; /* Last tick when started */ 257 /* Priority summary of owned objects that support inheritance */
258 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
259 on an object that supports PIP -
260 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
261 struct priority_distribution pdist; /* Priority summary of owned objects
262 that have blocked threads and thread's own
263 base priority */
264 int skip_count; /* Number of times skipped if higher priority
265 thread was running */
251#endif 266#endif
252 unsigned short stack_size; /* Size of stack in bytes */ 267 unsigned short stack_size; /* Size of stack in bytes */
253#ifdef HAVE_PRIORITY_SCHEDULING 268#ifdef HAVE_PRIORITY_SCHEDULING
254 unsigned char priority; /* Current priority */ 269 unsigned char base_priority; /* Base priority (set explicitly during
255 unsigned char priority_x; /* Inherited priority - right now just a 270 creation or thread_set_priority) */
256 runtime guarantee flag */ 271 unsigned char priority; /* Scheduled priority (higher of base or
272 all threads blocked by this one) */
257#endif 273#endif
258 unsigned char state; /* Thread slot state (STATE_*) */ 274 unsigned char state; /* Thread slot state (STATE_*) */
259#if NUM_CORES > 1
260 unsigned char core; /* The core to which thread belongs */
261#endif
262#ifdef HAVE_SCHEDULER_BOOSTCTRL 275#ifdef HAVE_SCHEDULER_BOOSTCTRL
263 unsigned char boosted; /* CPU frequency boost flag */ 276 unsigned char cpu_boost; /* CPU frequency boost flag */
264#endif 277#endif
265#if CONFIG_CORELOCK == SW_CORELOCK 278#if NUM_CORES > 1
266 struct corelock cl; /* Corelock to lock thread slot */ 279 unsigned char core; /* The core to which thread belongs */
280 struct corelock waiter_cl; /* Corelock for thread_wait */
281 struct corelock slot_cl; /* Corelock to lock thread slot */
267#endif 282#endif
268}; 283};
269 284
270#if NUM_CORES > 1 285#if NUM_CORES > 1
271/* Operations to be performed just before stopping a thread and starting 286/* Operations to be performed just before stopping a thread and starting
272 a new one if specified before calling switch_thread */ 287 a new one if specified before calling switch_thread */
273#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */ 288enum
274#if CONFIG_CORELOCK == CORELOCK_SWAP 289{
275#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */ 290 TBOP_CLEAR = 0, /* No operation to do */
276#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */ 291 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
277#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/ 292 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
278#endif /* CONFIG_CORELOCK */ 293};
279#define TBOP_UNLOCK_CORELOCK 0x04
280#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
281#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
282#define TBOP_SWITCH_CORE 0x20 /* Call the core switch preparation routine */
283 294
284struct thread_blk_ops 295struct thread_blk_ops
285{ 296{
286#if CONFIG_CORELOCK != SW_CORELOCK 297 struct corelock *cl_p; /* pointer to corelock */
287 union 298 unsigned char flags; /* TBOP_* flags */
288 {
289 int var_iv; /* int variable value to set */
290 uint8_t var_u8v; /* unsigned char valur to set */
291 struct thread_entry *list_v; /* list pointer queue value to set */
292 };
293#endif
294 union
295 {
296#if CONFIG_CORELOCK != SW_CORELOCK
297 int *var_ip; /* pointer to int variable */
298 uint8_t *var_u8p; /* pointer to unsigned char varuable */
299#endif
300 struct thread_queue *list_p; /* pointer to list variable */
301 };
302#if CONFIG_CORELOCK == SW_CORELOCK
303 struct corelock *cl_p; /* corelock to unlock */
304 struct thread_entry *thread; /* thread to unlock */
305#elif CONFIG_CORELOCK == CORELOCK_SWAP
306 unsigned char state; /* new thread state (performs unlock) */
307#endif /* SOFTWARE_CORELOCK */
308 unsigned char flags; /* TBOP_* flags */
309}; 299};
310#endif /* NUM_CORES > 1 */ 300#endif /* NUM_CORES > 1 */
311 301
@@ -316,28 +306,30 @@ struct core_entry
316{ 306{
317 /* "Active" lists - core is constantly active on these and are never 307 /* "Active" lists - core is constantly active on these and are never
318 locked and interrupts do not access them */ 308 locked and interrupts do not access them */
319 struct thread_entry *running; /* threads that are running */ 309 struct thread_entry *running; /* threads that are running (RTR) */
320 struct thread_entry *timeout; /* threads that are on a timeout before 310 struct thread_entry *timeout; /* threads that are on a timeout before
321 running again */ 311 running again */
322 /* "Shared" lists - cores interact in a synchronized manner - access 312 struct thread_entry *block_task; /* Task going off running list */
323 is locked between cores and interrupts */ 313#ifdef HAVE_PRIORITY_SCHEDULING
324 struct thread_queue waking; /* intermediate locked list that 314 struct priority_distribution rtr; /* Summary of running and ready-to-run
325 hold threads other core should wake up 315 threads */
326 on next task switch */ 316#endif
327 long next_tmo_check; /* soonest time to check tmo threads */ 317 long next_tmo_check; /* soonest time to check tmo threads */
328#if NUM_CORES > 1 318#if NUM_CORES > 1
329 struct thread_blk_ops blk_ops; /* operations to perform when 319 struct thread_blk_ops blk_ops; /* operations to perform when
330 blocking a thread */ 320 blocking a thread */
331#endif /* NUM_CORES */
332#ifdef HAVE_PRIORITY_SCHEDULING 321#ifdef HAVE_PRIORITY_SCHEDULING
333 unsigned char highest_priority; 322 struct corelock rtr_cl; /* Lock for rtr list */
334#endif 323#endif
324#endif /* NUM_CORES */
335}; 325};
336 326
337#ifdef HAVE_PRIORITY_SCHEDULING 327#ifdef HAVE_PRIORITY_SCHEDULING
338#define IF_PRIO(...) __VA_ARGS__ 328#define IF_PRIO(...) __VA_ARGS__
329#define IFN_PRIO(...)
339#else 330#else
340#define IF_PRIO(...) 331#define IF_PRIO(...)
332#define IFN_PRIO(...) __VA_ARGS__
341#endif 333#endif
342 334
343/* Macros generate better code than an inline function is this case */ 335/* Macros generate better code than an inline function is this case */
@@ -464,13 +456,18 @@ struct core_entry
464void core_idle(void); 456void core_idle(void);
465void core_wake(IF_COP_VOID(unsigned int core)); 457void core_wake(IF_COP_VOID(unsigned int core));
466 458
459/* Initialize the scheduler */
460void init_threads(void);
461
462/* Allocate a thread in the scheduler */
467#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ 463#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
468struct thread_entry* 464struct thread_entry*
469 create_thread(void (*function)(void), void* stack, int stack_size, 465 create_thread(void (*function)(void), void* stack, size_t stack_size,
470 unsigned flags, const char *name 466 unsigned flags, const char *name
471 IF_PRIO(, int priority) 467 IF_PRIO(, int priority)
472 IF_COP(, unsigned int core)); 468 IF_COP(, unsigned int core));
473 469
470/* Set and clear the CPU frequency boost flag for the calling thread */
474#ifdef HAVE_SCHEDULER_BOOSTCTRL 471#ifdef HAVE_SCHEDULER_BOOSTCTRL
475void trigger_cpu_boost(void); 472void trigger_cpu_boost(void);
476void cancel_cpu_boost(void); 473void cancel_cpu_boost(void);
@@ -478,86 +475,52 @@ void cancel_cpu_boost(void);
478#define trigger_cpu_boost() 475#define trigger_cpu_boost()
479#define cancel_cpu_boost() 476#define cancel_cpu_boost()
480#endif 477#endif
478/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
479 * Has no effect on a thread not frozen. */
481void thread_thaw(struct thread_entry *thread); 480void thread_thaw(struct thread_entry *thread);
481/* Wait for a thread to exit */
482void thread_wait(struct thread_entry *thread); 482void thread_wait(struct thread_entry *thread);
483/* Exit the current thread */
484void thread_exit(void);
485#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
486#define ALLOW_REMOVE_THREAD
487/* Remove a thread from the scheduler */
483void remove_thread(struct thread_entry *thread); 488void remove_thread(struct thread_entry *thread);
484void switch_thread(struct thread_entry *old); 489#endif
485void sleep_thread(int ticks);
486 490
487/** 491/* Switch to next runnable thread */
488 * Setup to allow using thread queues as locked or non-locked without speed 492void switch_thread(void);
489 * sacrifices in both core locking types. 493/* Blocks a thread for at least the specified number of ticks (0 = wait until
490 * 494 * next tick) */
491 * The blocking/waking function inline two different version of the real 495void sleep_thread(int ticks);
492 * function into the stubs when a software or other separate core locking 496/* Indefinitely blocks the current thread on a thread queue */
493 * mechanism is employed. 497void block_thread(struct thread_entry *current);
494 * 498/* Blocks the current thread on a thread queue until explicitely woken or
495 * When a simple test-and-set or similar instruction is available, locking 499 * the timeout is reached */
496 * has no cost and so one version is used and the internal worker is called 500void block_thread_w_tmo(struct thread_entry *current, int timeout);
497 * directly. 501
498 * 502/* Return bit flags for thread wakeup */
499 * CORELOCK_NONE is treated the same as when an atomic instruction can be 503#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
500 * used. 504#define THREAD_OK 0x1 /* A thread was woken up */
501 */ 505#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
506 higher priority than current were woken) */
502 507
503/* Blocks the current thread on a thread queue */
504#if CONFIG_CORELOCK == SW_CORELOCK
505void block_thread(struct thread_queue *tq);
506void block_thread_no_listlock(struct thread_entry **list);
507#else
508void _block_thread(struct thread_queue *tq);
509static inline void block_thread(struct thread_queue *tq)
510 { _block_thread(tq); }
511static inline void block_thread_no_listlock(struct thread_entry **list)
512 { _block_thread((struct thread_queue *)list); }
513#endif /* CONFIG_CORELOCK */
514
515/* Blocks the current thread on a thread queue for a max amount of time
516 * There is no "_no_listlock" version because timeout blocks without sync on
517 * the blocking queues is not permitted since either core could access the
518 * list at any time to do an implicit wake. In other words, objects with
519 * timeout support require lockable queues. */
520void block_thread_w_tmo(struct thread_queue *tq, int timeout);
521
522/* Wakes up the thread at the head of the queue */
523#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
524#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
525#if CONFIG_CORELOCK == SW_CORELOCK
526struct thread_entry * wakeup_thread(struct thread_queue *tq);
527struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
528#else
529struct thread_entry * _wakeup_thread(struct thread_queue *list);
530static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
531 { return _wakeup_thread(tq); }
532static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
533 { return _wakeup_thread((struct thread_queue *)list); }
534#endif /* CONFIG_CORELOCK */
535
536/* Initialize a thread_queue object. */
537static inline void thread_queue_init(struct thread_queue *tq)
538 { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
539/* A convenience function for waking an entire queue of threads. */ 508/* A convenience function for waking an entire queue of threads. */
540static inline void thread_queue_wake(struct thread_queue *tq) 509unsigned int thread_queue_wake(struct thread_entry **list);
541 { while (wakeup_thread(tq) != NULL); } 510
542/* The no-listlock version of thread_queue_wake() */ 511/* Wakeup a thread at the head of a list */
543static inline void thread_queue_wake_no_listlock(struct thread_entry **list) 512unsigned int wakeup_thread(struct thread_entry **list);
544 { while (wakeup_thread_no_listlock(list) != NULL); }
545 513
546#ifdef HAVE_PRIORITY_SCHEDULING 514#ifdef HAVE_PRIORITY_SCHEDULING
547int thread_set_priority(struct thread_entry *thread, int priority); 515int thread_set_priority(struct thread_entry *thread, int priority);
548int thread_get_priority(struct thread_entry *thread); 516int thread_get_priority(struct thread_entry *thread);
549/* Yield that guarantees thread execution once per round regardless of
550 thread's scheduler priority - basically a transient realtime boost
551 without altering the scheduler's thread precedence. */
552void priority_yield(void);
553#else
554#define priority_yield yield
555#endif /* HAVE_PRIORITY_SCHEDULING */ 517#endif /* HAVE_PRIORITY_SCHEDULING */
556#if NUM_CORES > 1 518#if NUM_CORES > 1
557unsigned int switch_core(unsigned int new_core); 519unsigned int switch_core(unsigned int new_core);
558#endif 520#endif
559struct thread_entry * thread_get_current(void); 521struct thread_entry * thread_get_current(void);
560void init_threads(void); 522
523/* Debugging info - only! */
561int thread_stack_usage(const struct thread_entry *thread); 524int thread_stack_usage(const struct thread_entry *thread);
562#if NUM_CORES > 1 525#if NUM_CORES > 1
563int idle_stack_usage(unsigned int core); 526int idle_stack_usage(unsigned int core);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 835181f1ae..47c0d58a95 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -20,21 +20,30 @@
20#include <string.h> 20#include <string.h>
21#include "config.h" 21#include "config.h"
22#include "kernel.h" 22#include "kernel.h"
23#ifdef SIMULATOR
24#include "system-sdl.h"
25#include "debug.h"
26#endif
23#include "thread.h" 27#include "thread.h"
24#include "cpu.h" 28#include "cpu.h"
25#include "system.h" 29#include "system.h"
26#include "panic.h" 30#include "panic.h"
27 31
28/* Make this nonzero to enable more elaborate checks on objects */ 32/* Make this nonzero to enable more elaborate checks on objects */
29#ifdef DEBUG 33#if defined(DEBUG) || defined(SIMULATOR)
30#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */ 34#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
31#else 35#else
32#define KERNEL_OBJECT_CHECKS 0 36#define KERNEL_OBJECT_CHECKS 0
33#endif 37#endif
34 38
35#if KERNEL_OBJECT_CHECKS 39#if KERNEL_OBJECT_CHECKS
40#ifdef SIMULATOR
41#define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
43#else
36#define KERNEL_ASSERT(exp, msg...) \ 44#define KERNEL_ASSERT(exp, msg...) \
37 ({ if (!({ exp; })) panicf(msg); }) 45 ({ if (!({ exp; })) panicf(msg); })
46#endif
38#else 47#else
39#define KERNEL_ASSERT(exp, msg...) ({}) 48#define KERNEL_ASSERT(exp, msg...) ({})
40#endif 49#endif
@@ -52,9 +61,7 @@ static struct
52{ 61{
53 int count; 62 int count;
54 struct event_queue *queues[MAX_NUM_QUEUES]; 63 struct event_queue *queues[MAX_NUM_QUEUES];
55#if NUM_CORES > 1 64 IF_COP( struct corelock cl; )
56 struct corelock cl;
57#endif
58} all_queues NOCACHEBSS_ATTR; 65} all_queues NOCACHEBSS_ATTR;
59 66
60/**************************************************************************** 67/****************************************************************************
@@ -77,6 +84,334 @@ void kernel_init(void)
77 } 84 }
78} 85}
79 86
87/****************************************************************************
88 * Timer tick
89 ****************************************************************************/
90#if CONFIG_CPU == SH7034
91void tick_start(unsigned int interval_in_ms)
92{
93 unsigned long count;
94
95 count = CPU_FREQ * interval_in_ms / 1000 / 8;
96
97 if(count > 0x10000)
98 {
99 panicf("Error! The tick interval is too long (%d ms)\n",
100 interval_in_ms);
101 return;
102 }
103
104 /* We are using timer 0 */
105
106 TSTR &= ~0x01; /* Stop the timer */
107 TSNC &= ~0x01; /* No synchronization */
108 TMDR &= ~0x01; /* Operate normally */
109
110 TCNT0 = 0; /* Start counting at 0 */
111 GRA0 = (unsigned short)(count - 1);
112 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
113
114 /* Enable interrupt on level 1 */
115 IPRC = (IPRC & ~0x00f0) | 0x0010;
116
117 TSR0 &= ~0x01;
118 TIER0 = 0xf9; /* Enable GRA match interrupt */
119
120 TSTR |= 0x01; /* Start timer 1 */
121}
122
123void IMIA0(void) __attribute__ ((interrupt_handler));
124void IMIA0(void)
125{
126 int i;
127
128 /* Run through the list of tick tasks */
129 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
130 {
131 if(tick_funcs[i])
132 {
133 tick_funcs[i]();
134 }
135 }
136
137 current_tick++;
138
139 TSR0 &= ~0x01;
140}
141#elif defined(CPU_COLDFIRE)
142void tick_start(unsigned int interval_in_ms)
143{
144 unsigned long count;
145 int prescale;
146
147 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
148
149 if(count > 0x10000)
150 {
151 panicf("Error! The tick interval is too long (%d ms)\n",
152 interval_in_ms);
153 return;
154 }
155
156 prescale = cpu_frequency / CPU_FREQ;
157 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
158 changes within timer.c */
159
160 /* We are using timer 0 */
161
162 TRR0 = (unsigned short)(count - 1); /* The reference count */
163 TCN0 = 0; /* reset the timer */
164 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
165 /* restart, CLK/16, enabled, prescaler */
166
167 TER0 = 0xff; /* Clear all events */
168
169 ICR1 = 0x8c; /* Interrupt on level 3.0 */
170 IMR &= ~0x200;
171}
172
173void TIMER0(void) __attribute__ ((interrupt_handler));
174void TIMER0(void)
175{
176 int i;
177
178 /* Run through the list of tick tasks */
179 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
180 {
181 if(tick_funcs[i])
182 {
183 tick_funcs[i]();
184 }
185 }
186
187 current_tick++;
188
189 TER0 = 0xff; /* Clear all events */
190}
191
192#elif defined(CPU_PP)
193
194#ifndef BOOTLOADER
195void TIMER1(void)
196{
197 int i;
198
199 /* Run through the list of tick tasks (using main core) */
200 TIMER1_VAL; /* Read value to ack IRQ */
201
202 /* Run through the list of tick tasks using main CPU core -
203 wake up the COP through its control interface to provide pulse */
204 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
205 {
206 if (tick_funcs[i])
207 {
208 tick_funcs[i]();
209 }
210 }
211
212#if NUM_CORES > 1
213 /* Pulse the COP */
214 core_wake(COP);
215#endif /* NUM_CORES */
216
217 current_tick++;
218}
219#endif
220
221/* Must be last function called init kernel/thread initialization */
222void tick_start(unsigned int interval_in_ms)
223{
224#ifndef BOOTLOADER
225 TIMER1_CFG = 0x0;
226 TIMER1_VAL;
227 /* enable timer */
228 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
229 /* unmask interrupt source */
230 CPU_INT_EN = TIMER1_MASK;
231#else
232 /* We don't enable interrupts in the bootloader */
233 (void)interval_in_ms;
234#endif
235}
236
237#elif CONFIG_CPU == PNX0101
238
239void timer_handler(void)
240{
241 int i;
242
243 /* Run through the list of tick tasks */
244 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
245 {
246 if(tick_funcs[i])
247 tick_funcs[i]();
248 }
249
250 current_tick++;
251
252 TIMER0.clr = 0;
253}
254
255void tick_start(unsigned int interval_in_ms)
256{
257 TIMER0.ctrl &= ~0x80; /* Disable the counter */
258 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
259 TIMER0.load = 3000000 * interval_in_ms / 1000;
260 TIMER0.ctrl &= ~0xc; /* No prescaler */
261 TIMER0.clr = 1; /* Clear the interrupt request */
262
263 irq_set_int_handler(IRQ_TIMER0, timer_handler);
264 irq_enable_int(IRQ_TIMER0);
265
266 TIMER0.ctrl |= 0x80; /* Enable the counter */
267}
268#endif
269
270int tick_add_task(void (*f)(void))
271{
272 int i;
273 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
274
275 /* Add a task if there is room */
276 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
277 {
278 if(tick_funcs[i] == NULL)
279 {
280 tick_funcs[i] = f;
281 set_irq_level(oldlevel);
282 return 0;
283 }
284 }
285 set_irq_level(oldlevel);
286 panicf("Error! tick_add_task(): out of tasks");
287 return -1;
288}
289
290int tick_remove_task(void (*f)(void))
291{
292 int i;
293 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
294
295 /* Remove a task if it is there */
296 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
297 {
298 if(tick_funcs[i] == f)
299 {
300 tick_funcs[i] = NULL;
301 set_irq_level(oldlevel);
302 return 0;
303 }
304 }
305
306 set_irq_level(oldlevel);
307 return -1;
308}
309
310/****************************************************************************
311 * Tick-based interval timers/one-shots - be mindful this is not really
312 * intended for continuous timers but for events that need to run for a short
313 * time and be cancelled without further software intervention.
314 ****************************************************************************/
315#ifdef INCLUDE_TIMEOUT_API
316static struct timeout *tmo_list = NULL; /* list of active timeout events */
317
318/* timeout tick task - calls event handlers when they expire
319 * Event handlers may alter ticks, callback and data during operation.
320 */
321static void timeout_tick(void)
322{
323 unsigned long tick = current_tick;
324 struct timeout *curr, *next;
325
326 for (curr = tmo_list; curr != NULL; curr = next)
327 {
328 next = (struct timeout *)curr->next;
329
330 if (TIME_BEFORE(tick, curr->expires))
331 continue;
332
333 /* this event has expired - call callback */
334 if (curr->callback(curr))
335 *(long *)&curr->expires = tick + curr->ticks; /* reload */
336 else
337 timeout_cancel(curr); /* cancel */
338 }
339}
340
341/* Cancels a timeout callback - can be called from the ISR */
342void timeout_cancel(struct timeout *tmo)
343{
344 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
345
346 if (tmo_list != NULL)
347 {
348 struct timeout *curr = tmo_list;
349 struct timeout *prev = NULL;
350
351 while (curr != tmo && curr != NULL)
352 {
353 prev = curr;
354 curr = (struct timeout *)curr->next;
355 }
356
357 if (curr != NULL)
358 {
359 /* in list */
360 if (prev == NULL)
361 tmo_list = (struct timeout *)curr->next;
362 else
363 *(const struct timeout **)&prev->next = curr->next;
364
365 if (tmo_list == NULL)
366 tick_remove_task(timeout_tick); /* last one - remove task */
367 }
368 /* not in list or tmo == NULL */
369 }
370
371 set_irq_level(oldlevel);
372}
373
374/* Adds a timeout callback - calling with an active timeout resets the
375 interval - can be called from the ISR */
376void timeout_register(struct timeout *tmo, timeout_cb_type callback,
377 int ticks, intptr_t data)
378{
379 int oldlevel;
380 struct timeout *curr;
381
382 if (tmo == NULL)
383 return;
384
385 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
386
387 /* see if this one is already registered */
388 curr = tmo_list;
389 while (curr != tmo && curr != NULL)
390 curr = (struct timeout *)curr->next;
391
392 if (curr == NULL)
393 {
394 /* not found - add it */
395 if (tmo_list == NULL)
396 tick_add_task(timeout_tick); /* first one - add task */
397
398 *(struct timeout **)&tmo->next = tmo_list;
399 tmo_list = tmo;
400 }
401
402 tmo->callback = callback;
403 tmo->ticks = ticks;
404 tmo->data = data;
405 *(long *)&tmo->expires = current_tick + ticks;
406
407 set_irq_level(oldlevel);
408}
409
410#endif /* INCLUDE_TIMEOUT_API */
411
412/****************************************************************************
413 * Thread stuff
414 ****************************************************************************/
80void sleep(int ticks) 415void sleep(int ticks)
81{ 416{
82#if CONFIG_CPU == S3C2440 && defined(BOOTLOADER) 417#if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
@@ -96,9 +431,11 @@ void sleep(int ticks)
96#elif defined(CPU_PP) && defined(BOOTLOADER) 431#elif defined(CPU_PP) && defined(BOOTLOADER)
97 unsigned stop = USEC_TIMER + ticks * (1000000/HZ); 432 unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
98 while (TIME_BEFORE(USEC_TIMER, stop)) 433 while (TIME_BEFORE(USEC_TIMER, stop))
99 switch_thread(NULL); 434 switch_thread();
100#else 435#else
436 set_irq_level(HIGHEST_IRQ_LEVEL);
101 sleep_thread(ticks); 437 sleep_thread(ticks);
438 switch_thread();
102#endif 439#endif
103} 440}
104 441
@@ -107,7 +444,7 @@ void yield(void)
107#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER)) 444#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
108 /* Some targets don't like yielding in the bootloader */ 445 /* Some targets don't like yielding in the bootloader */
109#else 446#else
110 switch_thread(NULL); 447 switch_thread();
111#endif 448#endif
112} 449}
113 450
@@ -116,43 +453,50 @@ void yield(void)
116 ****************************************************************************/ 453 ****************************************************************************/
117 454
118#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 455#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
119/* Moves waiting thread's descriptor to the current sender when a 456/****************************************************************************
120 message is dequeued */ 457 * Sender thread queue structure that aids implementation of priority
121static void queue_fetch_sender(struct queue_sender_list *send, 458 * inheritance on queues because the send list structure is the same as
122 unsigned int i) 459 * for all other kernel objects:
123{ 460 *
124 struct thread_entry **spp = &send->senders[i]; 461 * Example state:
125 462 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
126 if(*spp) 463 * E3 was posted with queue_post
127 { 464 * 4 events remain enqueued (E1-E4)
128 send->curr_sender = *spp; 465 *
129 *spp = NULL; 466 * rd wr
130 } 467 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
131} 468 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
469 * \/ \/ \/
470 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
471 * q->send->curr_sender: /\
472 *
473 * Thread has E0 in its own struct queue_event.
474 *
475 ****************************************************************************/
132 476
133/* Puts the specified return value in the waiting thread's return value 477/* Puts the specified return value in the waiting thread's return value
134 * and wakes the thread. 478 * and wakes the thread.
135 * 1) A sender should be confirmed to exist before calling which makes it 479 *
136 * more efficent to reject the majority of cases that don't need this 480 * A sender should be confirmed to exist before calling which makes it
137 called. 481 * more efficent to reject the majority of cases that don't need this
138 * 2) Requires interrupts disabled since queue overflows can cause posts 482 * called.
139 * from interrupt handlers to wake threads. Not doing so could cause
140 * an attempt at multiple wakes or other problems.
141 */ 483 */
142static void queue_release_sender(struct thread_entry **sender, 484static void queue_release_sender(struct thread_entry **sender,
143 intptr_t retval) 485 intptr_t retval)
144{ 486{
145 (*sender)->retval = retval; 487 struct thread_entry *thread = *sender;
146 wakeup_thread_no_listlock(sender); 488
147 /* This should _never_ happen - there must never be multiple 489 *sender = NULL; /* Clear slot. */
148 threads in this list and it is a corrupt state */ 490 thread->wakeup_ext_cb = NULL; /* Clear callback. */
149 KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender); 491 thread->retval = retval; /* Assign thread-local return value. */
492 *thread->bqp = thread; /* Move blocking queue head to thread since
493 wakeup_thread wakes the first thread in
494 the list. */
495 wakeup_thread(thread->bqp);
150} 496}
151 497
152/* Releases any waiting threads that are queued with queue_send - 498/* Releases any waiting threads that are queued with queue_send -
153 * reply with 0. 499 * reply with 0.
154 * Disable IRQs and lock before calling since it uses
155 * queue_release_sender.
156 */ 500 */
157static void queue_release_all_senders(struct event_queue *q) 501static void queue_release_all_senders(struct event_queue *q)
158{ 502{
@@ -172,25 +516,103 @@ static void queue_release_all_senders(struct event_queue *q)
172 } 516 }
173} 517}
174 518
519/* Callback to do extra forced removal steps from sender list in addition
520 * to the normal blocking queue removal and priority dis-inherit */
521static void queue_remove_sender_thread_cb(struct thread_entry *thread)
522{
523 *((struct thread_entry **)thread->retval) = NULL;
524 thread->wakeup_ext_cb = NULL;
525 thread->retval = 0;
526}
527
175/* Enables queue_send on the specified queue - caller allocates the extra 528/* Enables queue_send on the specified queue - caller allocates the extra
176 data structure. Only queues which are taken to be owned by a thread should 529 * data structure. Only queues which are taken to be owned by a thread should
177 enable this. Public waiting is not permitted. */ 530 * enable this however an official owner is not compulsory but must be
531 * specified for priority inheritance to operate.
532 *
533 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
534 * messages results in an undefined order of message replies.
535 */
178void queue_enable_queue_send(struct event_queue *q, 536void queue_enable_queue_send(struct event_queue *q,
179 struct queue_sender_list *send) 537 struct queue_sender_list *send,
538 struct thread_entry *owner)
180{ 539{
181 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 540 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
182 corelock_lock(&q->cl); 541 corelock_lock(&q->cl);
183 542
184 q->send = NULL; 543 if(send != NULL && q->send == NULL)
185 if(send != NULL)
186 { 544 {
187 memset(send, 0, sizeof(*send)); 545 memset(send, 0, sizeof(*send));
546#ifdef HAVE_PRIORITY_SCHEDULING
547 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
548 send->blocker.priority = PRIORITY_IDLE;
549 send->blocker.thread = owner;
550 if(owner != NULL)
551 q->blocker_p = &send->blocker;
552#endif
188 q->send = send; 553 q->send = send;
189 } 554 }
190 555
191 corelock_unlock(&q->cl); 556 corelock_unlock(&q->cl);
192 set_irq_level(oldlevel); 557 set_irq_level(oldlevel);
558
559 (void)owner;
193} 560}
561
562/* Unblock a blocked thread at a given event index */
563static inline void queue_do_unblock_sender(struct queue_sender_list *send,
564 unsigned int i)
565{
566 if(send)
567 {
568 struct thread_entry **spp = &send->senders[i];
569
570 if(*spp)
571 {
572 queue_release_sender(spp, 0);
573 }
574 }
575}
576
577/* Perform the auto-reply sequence */
578static inline void queue_do_auto_reply(struct queue_sender_list *send)
579{
580 if(send && send->curr_sender)
581 {
582 /* auto-reply */
583 queue_release_sender(&send->curr_sender, 0);
584 }
585}
586
587/* Moves waiting thread's refrence from the senders array to the
588 * current_sender which represents the thread waiting for a reponse to the
589 * last message removed from the queue. This also protects the thread from
590 * being bumped due to overflow which would not be a valid action since its
591 * message _is_ being processed at this point. */
592static inline void queue_do_fetch_sender(struct queue_sender_list *send,
593 unsigned int rd)
594{
595 if(send)
596 {
597 struct thread_entry **spp = &send->senders[rd];
598
599 if(*spp)
600 {
601 /* Move thread reference from array to the next thread
602 that queue_reply will release */
603 send->curr_sender = *spp;
604 (*spp)->retval = (intptr_t)spp;
605 *spp = NULL;
606 }
607 /* else message was posted asynchronously with queue_post */
608 }
609}
610#else
611/* Empty macros for when synchoronous sending is not made */
612#define queue_release_all_senders(q)
613#define queue_do_unblock_sender(send, i)
614#define queue_do_auto_reply(send)
615#define queue_do_fetch_sender(send, rd)
194#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ 616#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
195 617
196/* Queue must not be available for use during this call */ 618/* Queue must not be available for use during this call */
@@ -204,11 +626,12 @@ void queue_init(struct event_queue *q, bool register_queue)
204 } 626 }
205 627
206 corelock_init(&q->cl); 628 corelock_init(&q->cl);
207 thread_queue_init(&q->queue); 629 q->queue = NULL;
208 q->read = 0; 630 q->read = 0;
209 q->write = 0; 631 q->write = 0;
210#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 632#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
211 q->send = NULL; /* No message sending by default */ 633 q->send = NULL; /* No message sending by default */
634 IF_PRIO( q->blocker_p = NULL; )
212#endif 635#endif
213 636
214 if(register_queue) 637 if(register_queue)
@@ -254,14 +677,20 @@ void queue_delete(struct event_queue *q)
254 677
255 corelock_unlock(&all_queues.cl); 678 corelock_unlock(&all_queues.cl);
256 679
257 /* Release threads waiting on queue head */ 680 /* Release thread(s) waiting on queue head */
258 thread_queue_wake(&q->queue); 681 thread_queue_wake(&q->queue);
259 682
260#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 683#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 /* Release waiting threads for reply and reply to any dequeued 684 if(q->send)
262 message waiting for one. */ 685 {
263 queue_release_all_senders(q); 686 /* Release threads waiting for replies */
264 queue_reply(q, 0); 687 queue_release_all_senders(q);
688
689 /* Reply to any dequeued message waiting for one */
690 queue_do_auto_reply(q->send);
691
692 q->send = NULL;
693 }
265#endif 694#endif
266 695
267 q->read = 0; 696 q->read = 0;
@@ -279,33 +708,32 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
279 int oldlevel; 708 int oldlevel;
280 unsigned int rd; 709 unsigned int rd;
281 710
711#ifdef HAVE_PRIORITY_SCHEDULING
712 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
713 QUEUE_GET_THREAD(q) == thread_get_current(),
714 "queue_wait->wrong thread\n");
715#endif
716
282 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 717 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
283 corelock_lock(&q->cl); 718 corelock_lock(&q->cl);
284 719
285#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 720 /* auto-reply */
286 if(q->send && q->send->curr_sender) 721 queue_do_auto_reply(q->send);
287 {
288 /* auto-reply */
289 queue_release_sender(&q->send->curr_sender, 0);
290 }
291#endif
292 722
293 if (q->read == q->write) 723 if (q->read == q->write)
294 { 724 {
725 struct thread_entry *current = cores[CURRENT_CORE].running;
726
295 do 727 do
296 { 728 {
297#if CONFIG_CORELOCK == CORELOCK_NONE 729 IF_COP( current->obj_cl = &q->cl; )
298#elif CONFIG_CORELOCK == SW_CORELOCK 730 current->bqp = &q->queue;
299 const unsigned int core = CURRENT_CORE; 731
300 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 732 block_thread(current);
301 cores[core].blk_ops.cl_p = &q->cl; 733
302#elif CONFIG_CORELOCK == CORELOCK_SWAP 734 corelock_unlock(&q->cl);
303 const unsigned int core = CURRENT_CORE; 735 switch_thread();
304 cores[core].blk_ops.flags = TBOP_SET_VARu8; 736
305 cores[core].blk_ops.var_u8p = &q->cl.locked;
306 cores[core].blk_ops.var_u8v = 0;
307#endif /* CONFIG_CORELOCK */
308 block_thread(&q->queue);
309 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 737 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
310 corelock_lock(&q->cl); 738 corelock_lock(&q->cl);
311 } 739 }
@@ -316,13 +744,8 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
316 rd = q->read++ & QUEUE_LENGTH_MASK; 744 rd = q->read++ & QUEUE_LENGTH_MASK;
317 *ev = q->events[rd]; 745 *ev = q->events[rd];
318 746
319#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 747 /* Get data for a waiting thread if one */
320 if(q->send && q->send->senders[rd]) 748 queue_do_fetch_sender(q->send, rd);
321 {
322 /* Get data for a waiting thread if one */
323 queue_fetch_sender(q->send, rd);
324 }
325#endif
326 749
327 corelock_unlock(&q->cl); 750 corelock_unlock(&q->cl);
328 set_irq_level(oldlevel); 751 set_irq_level(oldlevel);
@@ -332,31 +755,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
332{ 755{
333 int oldlevel; 756 int oldlevel;
334 757
758#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
759 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
760 QUEUE_GET_THREAD(q) == thread_get_current(),
761 "queue_wait_w_tmo->wrong thread\n");
762#endif
763
335 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 764 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
336 corelock_lock(&q->cl); 765 corelock_lock(&q->cl);
337 766
338#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 767 /* Auto-reply */
339 if (q->send && q->send->curr_sender) 768 queue_do_auto_reply(q->send);
340 {
341 /* auto-reply */
342 queue_release_sender(&q->send->curr_sender, 0);
343 }
344#endif
345 769
346 if (q->read == q->write && ticks > 0) 770 if (q->read == q->write && ticks > 0)
347 { 771 {
348#if CONFIG_CORELOCK == CORELOCK_NONE 772 struct thread_entry *current = cores[CURRENT_CORE].running;
349#elif CONFIG_CORELOCK == SW_CORELOCK 773
350 const unsigned int core = CURRENT_CORE; 774 IF_COP( current->obj_cl = &q->cl; )
351 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 775 current->bqp = &q->queue;
352 cores[core].blk_ops.cl_p = &q->cl; 776
353#elif CONFIG_CORELOCK == CORELOCK_SWAP 777 block_thread_w_tmo(current, ticks);
354 const unsigned int core = CURRENT_CORE; 778 corelock_unlock(&q->cl);
355 cores[core].blk_ops.flags = TBOP_SET_VARu8; 779
356 cores[core].blk_ops.var_u8p = &q->cl.locked; 780 switch_thread();
357 cores[core].blk_ops.var_u8v = 0; 781
358#endif
359 block_thread_w_tmo(&q->queue, ticks);
360 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 782 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
361 corelock_lock(&q->cl); 783 corelock_lock(&q->cl);
362 } 784 }
@@ -367,14 +789,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
367 { 789 {
368 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK; 790 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
369 *ev = q->events[rd]; 791 *ev = q->events[rd];
370 792 /* Get data for a waiting thread if one */
371#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 793 queue_do_fetch_sender(q->send, rd);
372 if(q->send && q->send->senders[rd])
373 {
374 /* Get data for a waiting thread if one */
375 queue_fetch_sender(q->send, rd);
376 }
377#endif
378 } 794 }
379 else 795 else
380 { 796 {
@@ -398,18 +814,8 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
398 q->events[wr].id = id; 814 q->events[wr].id = id;
399 q->events[wr].data = data; 815 q->events[wr].data = data;
400 816
401#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 817 /* overflow protect - unblock any thread waiting at this index */
402 if(q->send) 818 queue_do_unblock_sender(q->send, wr);
403 {
404 struct thread_entry **spp = &q->send->senders[wr];
405
406 if (*spp)
407 {
408 /* overflow protect - unblock any thread waiting at this index */
409 queue_release_sender(spp, 0);
410 }
411 }
412#endif
413 819
414 /* Wakeup a waiting thread if any */ 820 /* Wakeup a waiting thread if any */
415 wakeup_thread(&q->queue); 821 wakeup_thread(&q->queue);
@@ -436,8 +842,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
436 842
437 if(q->send) 843 if(q->send)
438 { 844 {
439 const unsigned int core = CURRENT_CORE; 845 struct queue_sender_list *send = q->send;
440 struct thread_entry **spp = &q->send->senders[wr]; 846 struct thread_entry **spp = &send->senders[wr];
847 struct thread_entry *current = cores[CURRENT_CORE].running;
441 848
442 if(*spp) 849 if(*spp)
443 { 850 {
@@ -448,17 +855,20 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
448 /* Wakeup a waiting thread if any */ 855 /* Wakeup a waiting thread if any */
449 wakeup_thread(&q->queue); 856 wakeup_thread(&q->queue);
450 857
451#if CONFIG_CORELOCK == CORELOCK_NONE 858 /* Save thread in slot, add to list and wait for reply */
452#elif CONFIG_CORELOCK == SW_CORELOCK 859 *spp = current;
453 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 860 IF_COP( current->obj_cl = &q->cl; )
454 cores[core].blk_ops.cl_p = &q->cl; 861 IF_PRIO( current->blocker = q->blocker_p; )
455#elif CONFIG_CORELOCK == CORELOCK_SWAP 862 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
456 cores[core].blk_ops.flags = TBOP_SET_VARu8; 863 current->retval = (intptr_t)spp;
457 cores[core].blk_ops.var_u8p = &q->cl.locked; 864 current->bqp = &send->list;
458 cores[core].blk_ops.var_u8v = 0; 865
459#endif 866 block_thread(current);
460 block_thread_no_listlock(spp); 867
461 return cores[core].running->retval; 868 corelock_unlock(&q->cl);
869 switch_thread();
870
871 return current->retval;
462 } 872 }
463 873
464 /* Function as queue_post if sending is not enabled */ 874 /* Function as queue_post if sending is not enabled */
@@ -497,37 +907,22 @@ void queue_reply(struct event_queue *q, intptr_t retval)
497{ 907{
498 if(q->send && q->send->curr_sender) 908 if(q->send && q->send->curr_sender)
499 { 909 {
500#if NUM_CORES > 1
501 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 910 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
502 corelock_lock(&q->cl); 911 corelock_lock(&q->cl);
503 /* Double-check locking */ 912 /* Double-check locking */
504 if(q->send && q->send->curr_sender) 913 IF_COP( if(q->send && q->send->curr_sender) )
505 { 914 {
506#endif
507
508 queue_release_sender(&q->send->curr_sender, retval); 915 queue_release_sender(&q->send->curr_sender, retval);
509
510#if NUM_CORES > 1
511 } 916 }
917
512 corelock_unlock(&q->cl); 918 corelock_unlock(&q->cl);
513 set_irq_level(oldlevel); 919 set_irq_level(oldlevel);
514#endif
515 } 920 }
516} 921}
517#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
518
519/* Poll queue to see if a message exists - careful in using the result if
520 * queue_remove_from_head is called when messages are posted - possibly use
521 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
522 * unsignals the queue may cause an unwanted block */
523bool queue_empty(const struct event_queue* q)
524{
525 return ( q->read == q->write );
526}
527 922
528bool queue_peek(struct event_queue *q, struct queue_event *ev) 923bool queue_peek(struct event_queue *q, struct queue_event *ev)
529{ 924{
530 if (q->read == q->write) 925 if(q->read == q->write)
531 return false; 926 return false;
532 927
533 bool have_msg = false; 928 bool have_msg = false;
@@ -535,7 +930,7 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
535 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 930 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
536 corelock_lock(&q->cl); 931 corelock_lock(&q->cl);
537 932
538 if (q->read != q->write) 933 if(q->read != q->write)
539 { 934 {
540 *ev = q->events[q->read & QUEUE_LENGTH_MASK]; 935 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
541 have_msg = true; 936 have_msg = true;
@@ -546,6 +941,16 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
546 941
547 return have_msg; 942 return have_msg;
548} 943}
944#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
945
946/* Poll queue to see if a message exists - careful in using the result if
947 * queue_remove_from_head is called when messages are posted - possibly use
948 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
949 * unsignals the queue may cause an unwanted block */
950bool queue_empty(const struct event_queue* q)
951{
952 return ( q->read == q->write );
953}
549 954
550void queue_clear(struct event_queue* q) 955void queue_clear(struct event_queue* q)
551{ 956{
@@ -554,11 +959,9 @@ void queue_clear(struct event_queue* q)
554 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 959 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
555 corelock_lock(&q->cl); 960 corelock_lock(&q->cl);
556 961
557#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
558 /* Release all threads waiting in the queue for a reply - 962 /* Release all threads waiting in the queue for a reply -
559 dequeued sent message will be handled by owning thread */ 963 dequeued sent message will be handled by owning thread */
560 queue_release_all_senders(q); 964 queue_release_all_senders(q);
561#endif
562 965
563 q->read = 0; 966 q->read = 0;
564 q->write = 0; 967 q->write = 0;
@@ -583,18 +986,9 @@ void queue_remove_from_head(struct event_queue *q, long id)
583 break; 986 break;
584 } 987 }
585 988
586#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME 989 /* Release any thread waiting on this message */
587 if(q->send) 990 queue_do_unblock_sender(q->send, rd);
588 {
589 struct thread_entry **spp = &q->send->senders[rd];
590 991
591 if (*spp)
592 {
593 /* Release any thread waiting on this message */
594 queue_release_sender(spp, 0);
595 }
596 }
597#endif
598 q->read++; 992 q->read++;
599 } 993 }
600 994
@@ -636,397 +1030,72 @@ int queue_broadcast(long id, intptr_t data)
636} 1030}
637 1031
638/**************************************************************************** 1032/****************************************************************************
639 * Timer tick
640 ****************************************************************************/
641#if CONFIG_CPU == SH7034
642void tick_start(unsigned int interval_in_ms)
643{
644 unsigned long count;
645
646 count = CPU_FREQ * interval_in_ms / 1000 / 8;
647
648 if(count > 0x10000)
649 {
650 panicf("Error! The tick interval is too long (%d ms)\n",
651 interval_in_ms);
652 return;
653 }
654
655 /* We are using timer 0 */
656
657 TSTR &= ~0x01; /* Stop the timer */
658 TSNC &= ~0x01; /* No synchronization */
659 TMDR &= ~0x01; /* Operate normally */
660
661 TCNT0 = 0; /* Start counting at 0 */
662 GRA0 = (unsigned short)(count - 1);
663 TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
664
665 /* Enable interrupt on level 1 */
666 IPRC = (IPRC & ~0x00f0) | 0x0010;
667
668 TSR0 &= ~0x01;
669 TIER0 = 0xf9; /* Enable GRA match interrupt */
670
671 TSTR |= 0x01; /* Start timer 1 */
672}
673
674void IMIA0(void) __attribute__ ((interrupt_handler));
675void IMIA0(void)
676{
677 int i;
678
679 /* Run through the list of tick tasks */
680 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
681 {
682 if(tick_funcs[i])
683 {
684 tick_funcs[i]();
685 }
686 }
687
688 current_tick++;
689
690 TSR0 &= ~0x01;
691}
692#elif defined(CPU_COLDFIRE)
693void tick_start(unsigned int interval_in_ms)
694{
695 unsigned long count;
696 int prescale;
697
698 count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
699
700 if(count > 0x10000)
701 {
702 panicf("Error! The tick interval is too long (%d ms)\n",
703 interval_in_ms);
704 return;
705 }
706
707 prescale = cpu_frequency / CPU_FREQ;
708 /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
709 changes within timer.c */
710
711 /* We are using timer 0 */
712
713 TRR0 = (unsigned short)(count - 1); /* The reference count */
714 TCN0 = 0; /* reset the timer */
715 TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
716 /* restart, CLK/16, enabled, prescaler */
717
718 TER0 = 0xff; /* Clear all events */
719
720 ICR1 = 0x8c; /* Interrupt on level 3.0 */
721 IMR &= ~0x200;
722}
723
724void TIMER0(void) __attribute__ ((interrupt_handler));
725void TIMER0(void)
726{
727 int i;
728
729 /* Run through the list of tick tasks */
730 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
731 {
732 if(tick_funcs[i])
733 {
734 tick_funcs[i]();
735 }
736 }
737
738 current_tick++;
739
740 TER0 = 0xff; /* Clear all events */
741}
742
743#elif defined(CPU_PP)
744
745#ifndef BOOTLOADER
746void TIMER1(void)
747{
748 int i;
749
750 /* Run through the list of tick tasks (using main core) */
751 TIMER1_VAL; /* Read value to ack IRQ */
752
753 /* Run through the list of tick tasks using main CPU core -
754 wake up the COP through its control interface to provide pulse */
755 for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
756 {
757 if (tick_funcs[i])
758 {
759 tick_funcs[i]();
760 }
761 }
762
763#if NUM_CORES > 1
764 /* Pulse the COP */
765 core_wake(COP);
766#endif /* NUM_CORES */
767
768 current_tick++;
769}
770#endif
771
772/* Must be last function called init kernel/thread initialization */
773void tick_start(unsigned int interval_in_ms)
774{
775#ifndef BOOTLOADER
776 TIMER1_CFG = 0x0;
777 TIMER1_VAL;
778 /* enable timer */
779 TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
780 /* unmask interrupt source */
781 CPU_INT_EN = TIMER1_MASK;
782#else
783 /* We don't enable interrupts in the bootloader */
784 (void)interval_in_ms;
785#endif
786}
787
788#elif CONFIG_CPU == PNX0101
789
790void timer_handler(void)
791{
792 int i;
793
794 /* Run through the list of tick tasks */
795 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
796 {
797 if(tick_funcs[i])
798 tick_funcs[i]();
799 }
800
801 current_tick++;
802
803 TIMER0.clr = 0;
804}
805
806void tick_start(unsigned int interval_in_ms)
807{
808 TIMER0.ctrl &= ~0x80; /* Disable the counter */
809 TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
810 TIMER0.load = 3000000 * interval_in_ms / 1000;
811 TIMER0.ctrl &= ~0xc; /* No prescaler */
812 TIMER0.clr = 1; /* Clear the interrupt request */
813
814 irq_set_int_handler(IRQ_TIMER0, timer_handler);
815 irq_enable_int(IRQ_TIMER0);
816
817 TIMER0.ctrl |= 0x80; /* Enable the counter */
818}
819#endif
820
821int tick_add_task(void (*f)(void))
822{
823 int i;
824 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
825
826 /* Add a task if there is room */
827 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
828 {
829 if(tick_funcs[i] == NULL)
830 {
831 tick_funcs[i] = f;
832 set_irq_level(oldlevel);
833 return 0;
834 }
835 }
836 set_irq_level(oldlevel);
837 panicf("Error! tick_add_task(): out of tasks");
838 return -1;
839}
840
841int tick_remove_task(void (*f)(void))
842{
843 int i;
844 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
845
846 /* Remove a task if it is there */
847 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
848 {
849 if(tick_funcs[i] == f)
850 {
851 tick_funcs[i] = NULL;
852 set_irq_level(oldlevel);
853 return 0;
854 }
855 }
856
857 set_irq_level(oldlevel);
858 return -1;
859}
860
861/****************************************************************************
862 * Tick-based interval timers/one-shots - be mindful this is not really
863 * intended for continuous timers but for events that need to run for a short
864 * time and be cancelled without further software intervention.
865 ****************************************************************************/
866#ifdef INCLUDE_TIMEOUT_API
867static struct timeout *tmo_list = NULL; /* list of active timeout events */
868
869/* timeout tick task - calls event handlers when they expire
870 * Event handlers may alter ticks, callback and data during operation.
871 */
872static void timeout_tick(void)
873{
874 unsigned long tick = current_tick;
875 struct timeout *curr, *next;
876
877 for (curr = tmo_list; curr != NULL; curr = next)
878 {
879 next = (struct timeout *)curr->next;
880
881 if (TIME_BEFORE(tick, curr->expires))
882 continue;
883
884 /* this event has expired - call callback */
885 if (curr->callback(curr))
886 *(long *)&curr->expires = tick + curr->ticks; /* reload */
887 else
888 timeout_cancel(curr); /* cancel */
889 }
890}
891
892/* Cancels a timeout callback - can be called from the ISR */
893void timeout_cancel(struct timeout *tmo)
894{
895 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
896
897 if (tmo_list != NULL)
898 {
899 struct timeout *curr = tmo_list;
900 struct timeout *prev = NULL;
901
902 while (curr != tmo && curr != NULL)
903 {
904 prev = curr;
905 curr = (struct timeout *)curr->next;
906 }
907
908 if (curr != NULL)
909 {
910 /* in list */
911 if (prev == NULL)
912 tmo_list = (struct timeout *)curr->next;
913 else
914 *(const struct timeout **)&prev->next = curr->next;
915
916 if (tmo_list == NULL)
917 tick_remove_task(timeout_tick); /* last one - remove task */
918 }
919 /* not in list or tmo == NULL */
920 }
921
922 set_irq_level(oldlevel);
923}
924
925/* Adds a timeout callback - calling with an active timeout resets the
926 interval - can be called from the ISR */
927void timeout_register(struct timeout *tmo, timeout_cb_type callback,
928 int ticks, intptr_t data)
929{
930 int oldlevel;
931 struct timeout *curr;
932
933 if (tmo == NULL)
934 return;
935
936 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
937
938 /* see if this one is already registered */
939 curr = tmo_list;
940 while (curr != tmo && curr != NULL)
941 curr = (struct timeout *)curr->next;
942
943 if (curr == NULL)
944 {
945 /* not found - add it */
946 if (tmo_list == NULL)
947 tick_add_task(timeout_tick); /* first one - add task */
948
949 *(struct timeout **)&tmo->next = tmo_list;
950 tmo_list = tmo;
951 }
952
953 tmo->callback = callback;
954 tmo->ticks = ticks;
955 tmo->data = data;
956 *(long *)&tmo->expires = current_tick + ticks;
957
958 set_irq_level(oldlevel);
959}
960
961#endif /* INCLUDE_TIMEOUT_API */
962
963/****************************************************************************
964 * Simple mutex functions ;) 1033 * Simple mutex functions ;)
965 ****************************************************************************/ 1034 ****************************************************************************/
1035
1036/* Initialize a mutex object - call before any use and do not call again once
1037 * the object is available to other threads */
966void mutex_init(struct mutex *m) 1038void mutex_init(struct mutex *m)
967{ 1039{
1040 corelock_init(&m->cl);
968 m->queue = NULL; 1041 m->queue = NULL;
969 m->thread = NULL;
970 m->count = 0; 1042 m->count = 0;
971 m->locked = 0; 1043 m->locked = 0;
972#if CONFIG_CORELOCK == SW_CORELOCK 1044 MUTEX_SET_THREAD(m, NULL);
973 corelock_init(&m->cl); 1045#ifdef HAVE_PRIORITY_SCHEDULING
1046 m->blocker.priority = PRIORITY_IDLE;
1047 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
1048 m->no_preempt = false;
974#endif 1049#endif
975} 1050}
976 1051
1052/* Gain ownership of a mutex object or block until it becomes free */
977void mutex_lock(struct mutex *m) 1053void mutex_lock(struct mutex *m)
978{ 1054{
979 const unsigned int core = CURRENT_CORE; 1055 const unsigned int core = CURRENT_CORE;
980 struct thread_entry *const thread = cores[core].running; 1056 struct thread_entry *current = cores[core].running;
981 1057
982 if(thread == m->thread) 1058 if(current == MUTEX_GET_THREAD(m))
983 { 1059 {
1060 /* current thread already owns this mutex */
984 m->count++; 1061 m->count++;
985 return; 1062 return;
986 } 1063 }
987 1064
988 /* Repeat some stuff here or else all the variation is too difficult to 1065 /* lock out other cores */
989 read */
990#if CONFIG_CORELOCK == CORELOCK_SWAP
991 /* peek at lock until it's no longer busy */
992 unsigned int locked;
993 while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
994 if(locked == 0)
995 {
996 m->thread = thread;
997 m->locked = 1;
998 return;
999 }
1000
1001 /* Block until the lock is open... */
1002 cores[core].blk_ops.flags = TBOP_SET_VARu8;
1003 cores[core].blk_ops.var_u8p = &m->locked;
1004 cores[core].blk_ops.var_u8v = 1;
1005#else
1006 corelock_lock(&m->cl); 1066 corelock_lock(&m->cl);
1007 if (m->locked == 0) 1067
1068 if(m->locked == 0)
1008 { 1069 {
1070 /* lock is open */
1071 MUTEX_SET_THREAD(m, current);
1009 m->locked = 1; 1072 m->locked = 1;
1010 m->thread = thread;
1011 corelock_unlock(&m->cl); 1073 corelock_unlock(&m->cl);
1012 return; 1074 return;
1013 } 1075 }
1014 1076
1015 /* Block until the lock is open... */ 1077 /* block until the lock is open... */
1016#if CONFIG_CORELOCK == SW_CORELOCK 1078 IF_COP( current->obj_cl = &m->cl; )
1017 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 1079 IF_PRIO( current->blocker = &m->blocker; )
1018 cores[core].blk_ops.cl_p = &m->cl; 1080 current->bqp = &m->queue;
1019#endif 1081
1020#endif /* CONFIG_CORELOCK */ 1082 set_irq_level(HIGHEST_IRQ_LEVEL);
1083 block_thread(current);
1021 1084
1022 block_thread_no_listlock(&m->queue); 1085 corelock_unlock(&m->cl);
1086
1087 /* ...and turn control over to next thread */
1088 switch_thread();
1023} 1089}
1024 1090
1091/* Release ownership of a mutex object - only owning thread must call this */
1025void mutex_unlock(struct mutex *m) 1092void mutex_unlock(struct mutex *m)
1026{ 1093{
1027 /* unlocker not being the owner is an unlocking violation */ 1094 /* unlocker not being the owner is an unlocking violation */
1028 KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running, 1095 KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(),
1029 "mutex_unlock->wrong thread (recurse)"); 1096 "mutex_unlock->wrong thread (%s != %s)\n",
1097 MUTEX_GET_THREAD(m)->name,
1098 thread_get_current()->name);
1030 1099
1031 if(m->count > 0) 1100 if(m->count > 0)
1032 { 1101 {
@@ -1035,37 +1104,33 @@ void mutex_unlock(struct mutex *m)
1035 return; 1104 return;
1036 } 1105 }
1037 1106
1038#if CONFIG_CORELOCK == SW_CORELOCK
1039 /* lock out other cores */ 1107 /* lock out other cores */
1040 corelock_lock(&m->cl); 1108 corelock_lock(&m->cl);
1041#elif CONFIG_CORELOCK == CORELOCK_SWAP
1042 /* wait for peeker to move on */
1043 while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
1044#endif
1045 1109
1046 /* transfer to next queued thread if any */ 1110 /* transfer to next queued thread if any */
1047 1111 if(m->queue == NULL)
1048 /* This can become busy using SWP but is safe since only one thread
1049 will be changing things at a time. Allowing timeout waits will
1050 change that however but not now. There is also a hazard the thread
1051 could be killed before performing the wakeup but that's just
1052 irresponsible. :-) */
1053 m->thread = m->queue;
1054
1055 if(m->thread == NULL)
1056 { 1112 {
1057 m->locked = 0; /* release lock */ 1113 /* no threads waiting - open the lock */
1058#if CONFIG_CORELOCK == SW_CORELOCK 1114 MUTEX_SET_THREAD(m, NULL);
1115 m->locked = 0;
1059 corelock_unlock(&m->cl); 1116 corelock_unlock(&m->cl);
1060#endif 1117 return;
1061 } 1118 }
1062 else /* another thread is waiting - remain locked */ 1119 else
1063 { 1120 {
1064 wakeup_thread_no_listlock(&m->queue); 1121 const int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1065#if CONFIG_CORELOCK == SW_CORELOCK 1122 /* Tranfer of owning thread is handled in the wakeup protocol
1123 * if priorities are enabled otherwise just set it from the
1124 * queue head. */
1125 IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
1126 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
1127 set_irq_level(oldlevel);
1128
1066 corelock_unlock(&m->cl); 1129 corelock_unlock(&m->cl);
1067#elif CONFIG_CORELOCK == CORELOCK_SWAP 1130
1068 m->locked = 1; 1131#ifdef HAVE_PRIORITY_SCHEDULING
1132 if((result & THREAD_SWITCH) && !m->no_preempt)
1133 switch_thread();
1069#endif 1134#endif
1070 } 1135 }
1071} 1136}
@@ -1083,28 +1148,32 @@ void spinlock_init(struct spinlock *l)
1083 1148
1084void spinlock_lock(struct spinlock *l) 1149void spinlock_lock(struct spinlock *l)
1085{ 1150{
1086 struct thread_entry *const thread = cores[CURRENT_CORE].running; 1151 const unsigned int core = CURRENT_CORE;
1152 struct thread_entry *current = cores[core].running;
1087 1153
1088 if (l->thread == thread) 1154 if(l->thread == current)
1089 { 1155 {
1156 /* current core already owns it */
1090 l->count++; 1157 l->count++;
1091 return; 1158 return;
1092 } 1159 }
1093 1160
1161 /* lock against other processor cores */
1094 corelock_lock(&l->cl); 1162 corelock_lock(&l->cl);
1095 1163
1096 l->thread = thread; 1164 /* take ownership */
1165 l->thread = current;
1097} 1166}
1098 1167
1099void spinlock_unlock(struct spinlock *l) 1168void spinlock_unlock(struct spinlock *l)
1100{ 1169{
1101 /* unlocker not being the owner is an unlocking violation */ 1170 /* unlocker not being the owner is an unlocking violation */
1102 KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running, 1171 KERNEL_ASSERT(l->thread == thread_get_current(),
1103 "spinlock_unlock->wrong thread"); 1172 "spinlock_unlock->wrong thread\n");
1104 1173
1105 if (l->count > 0) 1174 if(l->count > 0)
1106 { 1175 {
1107 /* this thread still owns lock */ 1176 /* this core still owns lock */
1108 l->count--; 1177 l->count--;
1109 return; 1178 return;
1110 } 1179 }
@@ -1124,76 +1193,62 @@ void spinlock_unlock(struct spinlock *l)
1124void semaphore_init(struct semaphore *s, int max, int start) 1193void semaphore_init(struct semaphore *s, int max, int start)
1125{ 1194{
1126 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, 1195 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
1127 "semaphore_init->inv arg"); 1196 "semaphore_init->inv arg\n");
1128 s->queue = NULL; 1197 s->queue = NULL;
1129 s->max = max; 1198 s->max = max;
1130 s->count = start; 1199 s->count = start;
1131#if CONFIG_CORELOCK == SW_CORELOCK
1132 corelock_init(&s->cl); 1200 corelock_init(&s->cl);
1133#endif
1134} 1201}
1135 1202
1136void semaphore_wait(struct semaphore *s) 1203void semaphore_wait(struct semaphore *s)
1137{ 1204{
1138#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1205 struct thread_entry *current;
1206
1139 corelock_lock(&s->cl); 1207 corelock_lock(&s->cl);
1208
1140 if(--s->count >= 0) 1209 if(--s->count >= 0)
1141 { 1210 {
1211 /* wait satisfied */
1142 corelock_unlock(&s->cl); 1212 corelock_unlock(&s->cl);
1143 return; 1213 return;
1144 } 1214 }
1145#elif CONFIG_CORELOCK == CORELOCK_SWAP
1146 int count;
1147 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1148 if(--count >= 0)
1149 {
1150 s->count = count;
1151 return;
1152 }
1153#endif
1154 1215
1155 /* too many waits - block until dequeued */ 1216 /* too many waits - block until dequeued... */
1156#if CONFIG_CORELOCK == SW_CORELOCK 1217 current = cores[CURRENT_CORE].running;
1157 const unsigned int core = CURRENT_CORE; 1218
1158 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 1219 IF_COP( current->obj_cl = &s->cl; )
1159 cores[core].blk_ops.cl_p = &s->cl; 1220 current->bqp = &s->queue;
1160#elif CONFIG_CORELOCK == CORELOCK_SWAP 1221
1161 const unsigned int core = CURRENT_CORE; 1222 set_irq_level(HIGHEST_IRQ_LEVEL);
1162 cores[core].blk_ops.flags = TBOP_SET_VARi; 1223 block_thread(current);
1163 cores[core].blk_ops.var_ip = &s->count; 1224
1164 cores[core].blk_ops.var_iv = count; 1225 corelock_unlock(&s->cl);
1165#endif 1226
1166 block_thread_no_listlock(&s->queue); 1227 /* ...and turn control over to next thread */
1228 switch_thread();
1167} 1229}
1168 1230
1169void semaphore_release(struct semaphore *s) 1231void semaphore_release(struct semaphore *s)
1170{ 1232{
1171#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1233 IF_PRIO( unsigned int result = THREAD_NONE; )
1234
1172 corelock_lock(&s->cl); 1235 corelock_lock(&s->cl);
1173 if (s->count < s->max)
1174 {
1175 if (++s->count <= 0)
1176 {
1177#elif CONFIG_CORELOCK == CORELOCK_SWAP
1178 int count;
1179 while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
1180 if(count < s->max)
1181 {
1182 if(++count <= 0)
1183 {
1184#endif /* CONFIG_CORELOCK */
1185 1236
1186 /* there should be threads in this queue */ 1237 if(s->count < s->max && ++s->count <= 0)
1187 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup"); 1238 {
1188 /* a thread was queued - wake it up */ 1239 /* there should be threads in this queue */
1189 wakeup_thread_no_listlock(&s->queue); 1240 KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
1190 } 1241 /* a thread was queued - wake it up */
1242 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1243 IF_PRIO( result = ) wakeup_thread(&s->queue);
1244 set_irq_level(oldlevel);
1191 } 1245 }
1192 1246
1193#if CONFIG_CORELOCK == SW_CORELOCK
1194 corelock_unlock(&s->cl); 1247 corelock_unlock(&s->cl);
1195#elif CONFIG_CORELOCK == CORELOCK_SWAP 1248
1196 s->count = count; 1249#ifdef HAVE_PRIORITY_SCHEDULING
1250 if(result & THREAD_SWITCH)
1251 switch_thread();
1197#endif 1252#endif
1198} 1253}
1199#endif /* HAVE_SEMAPHORE_OBJECTS */ 1254#endif /* HAVE_SEMAPHORE_OBJECTS */
@@ -1208,117 +1263,107 @@ void event_init(struct event *e, unsigned int flags)
1208 e->queues[STATE_SIGNALED] = NULL; 1263 e->queues[STATE_SIGNALED] = NULL;
1209 e->state = flags & STATE_SIGNALED; 1264 e->state = flags & STATE_SIGNALED;
1210 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0; 1265 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
1211#if CONFIG_CORELOCK == SW_CORELOCK
1212 corelock_init(&e->cl); 1266 corelock_init(&e->cl);
1213#endif
1214} 1267}
1215 1268
1216void event_wait(struct event *e, unsigned int for_state) 1269void event_wait(struct event *e, unsigned int for_state)
1217{ 1270{
1218 unsigned int last_state; 1271 struct thread_entry *current;
1219#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1272
1220 corelock_lock(&e->cl); 1273 corelock_lock(&e->cl);
1221 last_state = e->state;
1222#elif CONFIG_CORELOCK == CORELOCK_SWAP
1223 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1224#endif
1225 1274
1226 if(e->automatic != 0) 1275 if(e->automatic != 0)
1227 { 1276 {
1228 /* wait for false always satisfied by definition 1277 /* wait for false always satisfied by definition
1229 or if it just changed to false */ 1278 or if it just changed to false */
1230 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED) 1279 if(e->state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
1231 { 1280 {
1232 /* automatic - unsignal */ 1281 /* automatic - unsignal */
1233 e->state = STATE_NONSIGNALED; 1282 e->state = STATE_NONSIGNALED;
1234#if CONFIG_CORELOCK == SW_CORELOCK
1235 corelock_unlock(&e->cl); 1283 corelock_unlock(&e->cl);
1236#endif
1237 return; 1284 return;
1238 } 1285 }
1239 /* block until state matches */ 1286 /* block until state matches */
1240 } 1287 }
1241 else if(for_state == last_state) 1288 else if(for_state == e->state)
1242 { 1289 {
1243 /* the state being waited for is the current state */ 1290 /* the state being waited for is the current state */
1244#if CONFIG_CORELOCK == SW_CORELOCK
1245 corelock_unlock(&e->cl); 1291 corelock_unlock(&e->cl);
1246#elif CONFIG_CORELOCK == CORELOCK_SWAP
1247 e->state = last_state;
1248#endif
1249 return; 1292 return;
1250 } 1293 }
1251 1294
1252 { 1295 /* block until state matches what callers requests */
1253 /* current state does not match wait-for state */ 1296 current = cores[CURRENT_CORE].running;
1254#if CONFIG_CORELOCK == SW_CORELOCK 1297
1255 const unsigned int core = CURRENT_CORE; 1298 IF_COP( current->obj_cl = &e->cl; )
1256 cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK; 1299 current->bqp = &e->queues[for_state];
1257 cores[core].blk_ops.cl_p = &e->cl; 1300
1258#elif CONFIG_CORELOCK == CORELOCK_SWAP 1301 set_irq_level(HIGHEST_IRQ_LEVEL);
1259 const unsigned int core = CURRENT_CORE; 1302 block_thread(current);
1260 cores[core].blk_ops.flags = TBOP_SET_VARu8; 1303
1261 cores[core].blk_ops.var_u8p = &e->state; 1304 corelock_unlock(&e->cl);
1262 cores[core].blk_ops.var_u8v = last_state; 1305
1263#endif 1306 /* turn control over to next thread */
1264 block_thread_no_listlock(&e->queues[for_state]); 1307 switch_thread();
1265 }
1266} 1308}
1267 1309
1268void event_set_state(struct event *e, unsigned int state) 1310void event_set_state(struct event *e, unsigned int state)
1269{ 1311{
1270 unsigned int last_state; 1312 unsigned int result;
1271#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK 1313 int oldlevel;
1314
1272 corelock_lock(&e->cl); 1315 corelock_lock(&e->cl);
1273 last_state = e->state;
1274#elif CONFIG_CORELOCK == CORELOCK_SWAP
1275 while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
1276#endif
1277 1316
1278 if(last_state == state) 1317 if(e->state == state)
1279 { 1318 {
1280 /* no change */ 1319 /* no change */
1281#if CONFIG_CORELOCK == SW_CORELOCK
1282 corelock_unlock(&e->cl); 1320 corelock_unlock(&e->cl);
1283#elif CONFIG_CORELOCK == CORELOCK_SWAP
1284 e->state = last_state;
1285#endif
1286 return; 1321 return;
1287 } 1322 }
1288 1323
1324 IF_PRIO( result = THREAD_OK; )
1325
1326 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1327
1289 if(state == STATE_SIGNALED) 1328 if(state == STATE_SIGNALED)
1290 { 1329 {
1291 if(e->automatic != 0) 1330 if(e->automatic != 0)
1292 { 1331 {
1293 struct thread_entry *thread; 1332 /* no thread should have ever blocked for nonsignaled */
1294 /* no thread should have ever blocked for unsignaled */
1295 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL, 1333 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
1296 "set_event_state->queue[NS]:S"); 1334 "set_event_state->queue[NS]:S\n");
1297 /* pass to next thread and keep unsignaled - "pulse" */ 1335 /* pass to next thread and keep unsignaled - "pulse" */
1298 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]); 1336 result = wakeup_thread(&e->queues[STATE_SIGNALED]);
1299 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED; 1337 e->state = (result & THREAD_OK) ? STATE_NONSIGNALED : STATE_SIGNALED;
1300 } 1338 }
1301 else 1339 else
1302 { 1340 {
1303 /* release all threads waiting for signaled */ 1341 /* release all threads waiting for signaled */
1304 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
1305 e->state = STATE_SIGNALED; 1342 e->state = STATE_SIGNALED;
1343 IF_PRIO( result = )
1344 thread_queue_wake(&e->queues[STATE_SIGNALED]);
1306 } 1345 }
1307 } 1346 }
1308 else 1347 else
1309 { 1348 {
1310 /* release all threads waiting for unsignaled */ 1349 /* release all threads waiting for nonsignaled */
1311 1350
1312 /* no thread should have ever blocked if automatic */ 1351 /* no thread should have ever blocked if automatic */
1313 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL || 1352 KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
1314 e->automatic == 0, "set_event_state->queue[NS]:NS"); 1353 e->automatic == 0, "set_event_state->queue[NS]:NS\n");
1315 1354
1316 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
1317 e->state = STATE_NONSIGNALED; 1355 e->state = STATE_NONSIGNALED;
1356 IF_PRIO( result = )
1357 thread_queue_wake(&e->queues[STATE_NONSIGNALED]);
1318 } 1358 }
1319 1359
1320#if CONFIG_CORELOCK == SW_CORELOCK 1360 set_irq_level(oldlevel);
1361
1321 corelock_unlock(&e->cl); 1362 corelock_unlock(&e->cl);
1363
1364#ifdef HAVE_PRIORITY_SCHEDULING
1365 if(result & THREAD_SWITCH)
1366 switch_thread();
1322#endif 1367#endif
1323} 1368}
1324#endif /* HAVE_EVENT_OBJECTS */ 1369#endif /* HAVE_EVENT_OBJECTS */
diff --git a/firmware/pcm_record.c b/firmware/pcm_record.c
index 1437b438ba..49da257c08 100644
--- a/firmware/pcm_record.c
+++ b/firmware/pcm_record.c
@@ -361,11 +361,12 @@ unsigned long pcm_rec_sample_rate(void)
361void pcm_rec_init(void) 361void pcm_rec_init(void)
362{ 362{
363 queue_init(&pcmrec_queue, true); 363 queue_init(&pcmrec_queue, true);
364 queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send);
365 pcmrec_thread_p = 364 pcmrec_thread_p =
366 create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack), 365 create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack),
367 0, pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING) 366 0, pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING)
368 IF_COP(, CPU)); 367 IF_COP(, CPU));
368 queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send,
369 pcmrec_thread_p);
369} /* pcm_rec_init */ 370} /* pcm_rec_init */
370 371
371/** audio_* group **/ 372/** audio_* group **/
@@ -874,9 +875,9 @@ static void pcmrec_flush(unsigned flush_num)
874 logf("pcmrec: boost (%s)", 875 logf("pcmrec: boost (%s)",
875 num >= flood_watermark ? "num" : "time"); 876 num >= flood_watermark ? "num" : "time");
876 prio_pcmrec = thread_set_priority(NULL, 877 prio_pcmrec = thread_set_priority(NULL,
877 thread_get_priority(NULL) - 1); 878 thread_get_priority(NULL) - 4);
878 prio_codec = thread_set_priority(codec_thread_p, 879 prio_codec = thread_set_priority(codec_thread_p,
879 thread_get_priority(codec_thread_p) - 1); 880 thread_get_priority(codec_thread_p) - 4);
880 } 881 }
881#endif 882#endif
882 883
diff --git a/firmware/target/arm/ffs-arm.S b/firmware/target/arm/ffs-arm.S
new file mode 100644
index 0000000000..bb888ab558
--- /dev/null
+++ b/firmware/target/arm/ffs-arm.S
@@ -0,0 +1,74 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2008 by Michael Sevakis
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19 #include "config.h"
20
21/****************************************************************************
22 * int find_first_set_bit(uint32_t val);
23 *
24 * Find the index of the least significant set bit in the 32-bit word.
25 *
26 * return values:
27 * 0 - bit 0 is set
28 * 1 - bit 1 is set
29 * ...
30 * 31 - bit 31 is set
31 * 32 - no bits set
32 ****************************************************************************/
33 .align 2
34 .global find_first_set_bit
35 .type find_first_set_bit,%function
36find_first_set_bit:
37 @ Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry
38 rsb r2, r0, #0 @ r1 = r0 & -r0
39 ands r1, r0, r2 @
40
41 @ now r1 has at most one set bit, call this X
42
43#if ARM_ARCH >= 5
44 clz r0, r1 @ Get lead 0's count
45 rsbne r0, r0, #31 @ lead 0's -> bit index
46 bx lr @
47#else
48 @ this is the ffs algorithm devised by D.Seal and posted to
49 @ comp.sys.arm on 16 Feb 1994.
50 @
51 @ Output modified to suit Rockbox purposes.
52
53 adr r2, L_ffs_table
54 orrne r1, r1, r1, lsl #4 @ r1 = X * 0x11
55 orrne r1, r1, r1, lsl #6 @ r1 = X * 0x451
56 rsbne r1, r1, r1, lsl #16 @ r1 = X * 0x0450fbaf
57
58 @ now lookup in table indexed on top 6 bits of r1
59 ldrb r0, [ r2, r1, lsr #26 ] @
60 bx lr @
61
62L_ffs_table:
63 @ 0 1 2 3 4 5 6 7
64 @----------------------------------------------
65 .byte 32, 0, 1, 12, 2, 6, 0, 13 @ 0- 7
66 .byte 3, 0, 7, 0, 0, 0, 0, 14 @ 8-15
67 .byte 10, 4, 0, 0, 8, 0, 0, 25 @ 16-23
68 .byte 0, 0, 0, 0, 0, 21, 27, 15 @ 24-31
69 .byte 31, 11, 5, 0, 0, 0, 0, 0 @ 32-39
70 .byte 9, 0, 0, 24, 0, 0, 20, 26 @ 40-47
71 .byte 30, 0, 0, 0, 0, 23, 0, 19 @ 48-55
72 .byte 29, 0, 22, 18, 28, 17, 16, 0 @ 56-63
73#endif
74 .size find_first_set_bit, .-find_first_set_bit
diff --git a/firmware/target/arm/i2c-pp.c b/firmware/target/arm/i2c-pp.c
index 092a59be84..1cfbfaeff1 100644
--- a/firmware/target/arm/i2c-pp.c
+++ b/firmware/target/arm/i2c-pp.c
@@ -45,7 +45,7 @@ static int pp_i2c_wait_not_busy(void)
45 if (!(I2C_STATUS & I2C_BUSY)) { 45 if (!(I2C_STATUS & I2C_BUSY)) {
46 return 0; 46 return 0;
47 } 47 }
48 priority_yield(); 48 yield();
49 } 49 }
50 50
51 return -1; 51 return -1;
diff --git a/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c b/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c
index 1f5c5c8fbe..f5d37baf5f 100644
--- a/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c
+++ b/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c
@@ -128,7 +128,7 @@ void copy_read_sectors(unsigned char* buf, int wordcount)
128 128
129 /* Wait for transfer to complete */ 129 /* Wait for transfer to complete */
130 while((DSTAT0 & 0x000fffff)) 130 while((DSTAT0 & 0x000fffff))
131 priority_yield(); 131 yield();
132 /* Dump cache for the buffer */ 132 /* Dump cache for the buffer */
133} 133}
134#endif 134#endif
diff --git a/firmware/target/arm/sandisk/ata-c200_e200.c b/firmware/target/arm/sandisk/ata-c200_e200.c
index c1c0cb3e8c..747cb17ca1 100644
--- a/firmware/target/arm/sandisk/ata-c200_e200.c
+++ b/firmware/target/arm/sandisk/ata-c200_e200.c
@@ -198,7 +198,7 @@ static bool sd_poll_status(unsigned int trigger, long timeout)
198 if (TIME_AFTER(time, next_yield)) 198 if (TIME_AFTER(time, next_yield))
199 { 199 {
200 long ty = USEC_TIMER; 200 long ty = USEC_TIMER;
201 priority_yield(); 201 yield();
202 timeout += USEC_TIMER - ty; 202 timeout += USEC_TIMER - ty;
203 next_yield = ty + MIN_YIELD_PERIOD; 203 next_yield = ty + MIN_YIELD_PERIOD;
204 } 204 }
@@ -317,7 +317,7 @@ static int sd_wait_for_state(unsigned int state, int id)
317 us = USEC_TIMER; 317 us = USEC_TIMER;
318 if (TIME_AFTER(us, next_yield)) 318 if (TIME_AFTER(us, next_yield))
319 { 319 {
320 priority_yield(); 320 yield();
321 timeout += USEC_TIMER - us; 321 timeout += USEC_TIMER - us;
322 next_yield = us + MIN_YIELD_PERIOD; 322 next_yield = us + MIN_YIELD_PERIOD;
323 } 323 }
diff --git a/firmware/target/coldfire/ffs-coldfire.S b/firmware/target/coldfire/ffs-coldfire.S
new file mode 100644
index 0000000000..4f21013123
--- /dev/null
+++ b/firmware/target/coldfire/ffs-coldfire.S
@@ -0,0 +1,62 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2008 by Michael Sevakis
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19 #include "config.h"
20
21/****************************************************************************
22 * int find_first_set_bit(uint32_t val);
23 *
24 * Find the index of the least significant set bit in the 32-bit word.
25 *
26 * return values:
27 * 0 - bit 0 is set
28 * 1 - bit 1 is set
29 * ...
30 * 31 - bit 31 is set
31 * 32 - no bits set
32 ****************************************************************************/
33 .text
34 .align 2
35 .global find_first_set_bit
36 .type find_first_set_bit,@function
37find_first_set_bit:
38 | this is a coldfire version of the ffs algorithm devised by D.Seal
39 | and posted to comp.sys.arm on 16 Feb 1994.
40 |
41 | Output modified to suit rockbox purposes.
42
43 | Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry
44 move.l 4(%sp), %d1 | %d1 = %d1 & -%d1
45 lea.l L_ffs_table, %a0 | %a0 = table address
46 move.l %d1, %d0 |
47 neg.l %d1 |
48 and.l %d0, %d1 |
49
50 | now %d1 has at most one set bit, call this X
51
52 move.l #0x0450fbaf, %d0 | %d0 = multiplier
53 mulu.l %d0, %d1 | %d1 = X * 0x0450fbaf
54
55 | now lookup in table indexed on top 6 bits of %d0
56 moveq.l #26, %d0 | %d0 = final shift count
57 lsr.l %d0, %d1 |
58 clr.l %d0 |
59 move.b (%a0, %d1.l), %d0 |
60 rts |
61
62 .size find_first_set_bit, .-find_first_set_bit
diff --git a/firmware/thread.c b/firmware/thread.c
index 8bebfedbf5..259a66a652 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -28,6 +28,10 @@
28#ifdef RB_PROFILE 28#ifdef RB_PROFILE
29#include <profile.h> 29#include <profile.h>
30#endif 30#endif
31/****************************************************************************
32 * ATTENTION!! *
33 * See notes below on implementing processor-specific portions! *
34 ***************************************************************************/
31 35
32/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */ 36/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
33#ifdef DEBUG 37#ifdef DEBUG
@@ -59,9 +63,7 @@
59 * event queues. The kernel object must have a scheme to protect itself from 63 * event queues. The kernel object must have a scheme to protect itself from
60 * access by another processor and is responsible for serializing the calls 64 * access by another processor and is responsible for serializing the calls
61 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each 65 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
62 * other. If a thread blocks on an object it must fill-in the blk_ops members 66 * other. Objects' queues are also protected here.
63 * for its core to unlock _after_ the thread's context has been saved and the
64 * unlocking will be done in reverse from this heirarchy.
65 * 67 *
66 * 3) Thread Slot 68 * 3) Thread Slot
67 * This locks access to the thread's slot such that its state cannot be 69 * This locks access to the thread's slot such that its state cannot be
@@ -70,70 +72,66 @@
70 * a thread while it is still blocking will likely desync its state with 72 * a thread while it is still blocking will likely desync its state with
71 * the other resources used for that state. 73 * the other resources used for that state.
72 * 74 *
73 * 4) Lists 75 * 4) Core Lists
74 * Usually referring to a list (aka. queue) that a thread will be blocking
75 * on that belongs to some object and is shareable amongst multiple
76 * processors. Parts of the scheduler may have access to them without actually
77 * locking the kernel object such as when a thread is blocked with a timeout
78 * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
79 * it lists locked when the thread blocks so that all object list access is
80 * synchronized. Failure to do so would corrupt the list links.
81 *
82 * 5) Core Lists
83 * These lists are specific to a particular processor core and are accessible 76 * These lists are specific to a particular processor core and are accessible
84 * by all processor cores and interrupt handlers. They are used when an 77 * by all processor cores and interrupt handlers. The running (rtr) list is
85 * operation may only be performed by the thread's own core in a normal 78 * the prime example where a thread may be added by any means.
86 * execution context. The wakeup list is the prime example where a thread 79 */
87 * may be added by any means and the thread's own core will remove it from
88 * the wakeup list and put it on the running list (which is only ever
89 * accessible by its own processor).
90 */
91#define DEADBEEF ((unsigned int)0xdeadbeef)
92/* Cast to the the machine int type, whose size could be < 4. */
93struct core_entry cores[NUM_CORES] IBSS_ATTR;
94struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
95
96static const char main_thread_name[] = "main";
97extern int stackbegin[];
98extern int stackend[];
99 80
100/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup 81/*---------------------------------------------------------------------------
101 * never results in requiring a wait until the next tick (up to 10000uS!). May 82 * Processor specific: core_sleep/core_wake/misc. notes
102 * require assembly and careful instruction ordering. 83 *
84 * ARM notes:
85 * FIQ is not dealt with by the scheduler code and is simply restored if it
86 * must by masked for some reason - because threading modifies a register
87 * that FIQ may also modify and there's no way to accomplish it atomically.
88 * s3c2440 is such a case.
89 *
90 * Audio interrupts are generally treated at a higher priority than others
91 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
92 * are not in general safe. Special cases may be constructed on a per-
93 * source basis and blocking operations are not available.
94 *
95 * core_sleep procedure to implement for any CPU to ensure an asychronous
96 * wakup never results in requiring a wait until the next tick (up to
97 * 10000uS!). May require assembly and careful instruction ordering.
103 * 98 *
104 * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4. 99 * 1) On multicore, stay awake if directed to do so by another. If so, goto
105 * 2) If processor requires, atomically reenable interrupts and perform step 3. 100 * step 4.
106 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire) 101 * 2) If processor requires, atomically reenable interrupts and perform step
107 * goto step 5. 102 * 3.
103 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
104 * on Coldfire) goto step 5.
108 * 4) Enable interrupts. 105 * 4) Enable interrupts.
109 * 5) Exit procedure. 106 * 5) Exit procedure.
107 *
108 * core_wake and multprocessor notes for sleep/wake coordination:
109 * If possible, to wake up another processor, the forcing of an interrupt on
110 * the woken core by the waker core is the easiest way to ensure a non-
111 * delayed wake and immediate execution of any woken threads. If that isn't
112 * available then some careful non-blocking synchonization is needed (as on
113 * PP targets at the moment).
114 *---------------------------------------------------------------------------
110 */ 115 */
111static inline void core_sleep(IF_COP_VOID(unsigned int core))
112 __attribute__((always_inline));
113
114static void check_tmo_threads(void)
115 __attribute__((noinline));
116 116
117static inline void block_thread_on_l( 117/* Cast to the the machine pointer size, whose size could be < 4 or > 32
118 struct thread_queue *list, struct thread_entry *thread, unsigned state) 118 * (someday :). */
119 __attribute__((always_inline)); 119#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
120struct core_entry cores[NUM_CORES] IBSS_ATTR;
121struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
120 122
121static inline void block_thread_on_l_no_listlock( 123static const char main_thread_name[] = "main";
122 struct thread_entry **list, struct thread_entry *thread, unsigned state) 124extern uintptr_t stackbegin[];
123 __attribute__((always_inline)); 125extern uintptr_t stackend[];
124 126
125static inline void _block_thread_on_l( 127static inline void core_sleep(IF_COP_VOID(unsigned int core))
126 struct thread_queue *list, struct thread_entry *thread,
127 unsigned state IF_SWCL(, const bool single))
128 __attribute__((always_inline)); 128 __attribute__((always_inline));
129 129
130IF_SWCL(static inline) struct thread_entry * _wakeup_thread( 130void check_tmo_threads(void)
131 struct thread_queue *list IF_SWCL(, const bool nolock)) 131 __attribute__((noinline));
132 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
133 132
134IF_SWCL(static inline) void _block_thread( 133static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
135 struct thread_queue *list IF_SWCL(, const bool nolock)) 134 __attribute__((always_inline));
136 __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
137 135
138static void add_to_list_tmo(struct thread_entry *thread) 136static void add_to_list_tmo(struct thread_entry *thread)
139 __attribute__((noinline)); 137 __attribute__((noinline));
@@ -141,9 +139,6 @@ static void add_to_list_tmo(struct thread_entry *thread)
141static void core_schedule_wakeup(struct thread_entry *thread) 139static void core_schedule_wakeup(struct thread_entry *thread)
142 __attribute__((noinline)); 140 __attribute__((noinline));
143 141
144static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
145 __attribute__((always_inline));
146
147#if NUM_CORES > 1 142#if NUM_CORES > 1
148static inline void run_blocking_ops( 143static inline void run_blocking_ops(
149 unsigned int core, struct thread_entry *thread) 144 unsigned int core, struct thread_entry *thread)
@@ -159,10 +154,9 @@ static inline void store_context(void* addr)
159static inline void load_context(const void* addr) 154static inline void load_context(const void* addr)
160 __attribute__((always_inline)); 155 __attribute__((always_inline));
161 156
162void switch_thread(struct thread_entry *old) 157void switch_thread(void)
163 __attribute__((noinline)); 158 __attribute__((noinline));
164 159
165
166/**************************************************************************** 160/****************************************************************************
167 * Processor-specific section 161 * Processor-specific section
168 */ 162 */
@@ -172,8 +166,7 @@ void switch_thread(struct thread_entry *old)
172 * Start the thread running and terminate it if it returns 166 * Start the thread running and terminate it if it returns
173 *--------------------------------------------------------------------------- 167 *---------------------------------------------------------------------------
174 */ 168 */
175static void start_thread(void) __attribute__((naked,used)); 169static void __attribute__((naked,used)) start_thread(void)
176static void start_thread(void)
177{ 170{
178 /* r0 = context */ 171 /* r0 = context */
179 asm volatile ( 172 asm volatile (
@@ -188,19 +181,18 @@ static void start_thread(void)
188#endif 181#endif
189 "mov lr, pc \n" /* Call thread function */ 182 "mov lr, pc \n" /* Call thread function */
190 "bx r4 \n" 183 "bx r4 \n"
191 "mov r0, #0 \n" /* remove_thread(NULL) */
192 "ldr pc, =remove_thread \n"
193 ".ltorg \n" /* Dump constant pool */
194 ); /* No clobber list - new thread doesn't care */ 184 ); /* No clobber list - new thread doesn't care */
185 thread_exit();
186 //asm volatile (".ltorg"); /* Dump constant pool */
195} 187}
196 188
197/* For startup, place context pointer in r4 slot, start_thread pointer in r5 189/* For startup, place context pointer in r4 slot, start_thread pointer in r5
198 * slot, and thread function pointer in context.start. See load_context for 190 * slot, and thread function pointer in context.start. See load_context for
199 * what happens when thread is initially going to run. */ 191 * what happens when thread is initially going to run. */
200#define THREAD_STARTUP_INIT(core, thread, function) \ 192#define THREAD_STARTUP_INIT(core, thread, function) \
201 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \ 193 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
202 (thread)->context.r[1] = (unsigned int)start_thread, \ 194 (thread)->context.r[1] = (uint32_t)start_thread, \
203 (thread)->context.start = (void *)function; }) 195 (thread)->context.start = (uint32_t)function; })
204 196
205/*--------------------------------------------------------------------------- 197/*---------------------------------------------------------------------------
206 * Store non-volatile context. 198 * Store non-volatile context.
@@ -232,11 +224,11 @@ static inline void load_context(const void* addr)
232#if defined (CPU_PP) 224#if defined (CPU_PP)
233 225
234#if NUM_CORES > 1 226#if NUM_CORES > 1
235extern int cpu_idlestackbegin[]; 227extern uintptr_t cpu_idlestackbegin[];
236extern int cpu_idlestackend[]; 228extern uintptr_t cpu_idlestackend[];
237extern int cop_idlestackbegin[]; 229extern uintptr_t cop_idlestackbegin[];
238extern int cop_idlestackend[]; 230extern uintptr_t cop_idlestackend[];
239static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR = 231static uintptr_t * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
240{ 232{
241 [CPU] = cpu_idlestackbegin, 233 [CPU] = cpu_idlestackbegin,
242 [COP] = cop_idlestackbegin 234 [COP] = cop_idlestackbegin
@@ -253,7 +245,7 @@ struct core_semaphores
253}; 245};
254 246
255static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR; 247static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR;
256#endif 248#endif /* CONFIG_CPU == PP5002 */
257 249
258#endif /* NUM_CORES */ 250#endif /* NUM_CORES */
259 251
@@ -401,15 +393,15 @@ void corelock_unlock(struct corelock *cl)
401 * no other core requested a wakeup for it to perform a task. 393 * no other core requested a wakeup for it to perform a task.
402 *--------------------------------------------------------------------------- 394 *---------------------------------------------------------------------------
403 */ 395 */
396#ifdef CPU_PP502x
404#if NUM_CORES == 1 397#if NUM_CORES == 1
405/* Shared single-core build debugging version */
406static inline void core_sleep(void) 398static inline void core_sleep(void)
407{ 399{
408 PROC_CTL(CURRENT_CORE) = PROC_SLEEP; 400 PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
409 nop; nop; nop; 401 nop; nop; nop;
410 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 402 set_irq_level(IRQ_ENABLED);
411} 403}
412#elif defined (CPU_PP502x) 404#else
413static inline void core_sleep(unsigned int core) 405static inline void core_sleep(unsigned int core)
414{ 406{
415#if 1 407#if 1
@@ -429,8 +421,8 @@ static inline void core_sleep(unsigned int core)
429 "ldr r1, [%[mbx], #0] \n" 421 "ldr r1, [%[mbx], #0] \n"
430 "tst r1, r0, lsr #2 \n" 422 "tst r1, r0, lsr #2 \n"
431 "bne 1b \n" 423 "bne 1b \n"
432 "mrs r1, cpsr \n" /* Enable interrupts */ 424 "mrs r1, cpsr \n" /* Enable IRQ */
433 "bic r1, r1, #0xc0 \n" 425 "bic r1, r1, #0x80 \n"
434 "msr cpsr_c, r1 \n" 426 "msr cpsr_c, r1 \n"
435 : 427 :
436 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core) 428 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core)
@@ -452,11 +444,36 @@ static inline void core_sleep(unsigned int core)
452 /* Wait for other processor to finish wake procedure */ 444 /* Wait for other processor to finish wake procedure */
453 while (MBX_MSG_STAT & (0x1 << core)); 445 while (MBX_MSG_STAT & (0x1 << core));
454 446
455 /* Enable IRQ, FIQ */ 447 /* Enable IRQ */
456 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 448 set_irq_level(IRQ_ENABLED);
457#endif /* ASM/C selection */ 449#endif /* ASM/C selection */
458} 450}
451#endif /* NUM_CORES */
459#elif CONFIG_CPU == PP5002 452#elif CONFIG_CPU == PP5002
453#if NUM_CORES == 1
454static inline void core_sleep(void)
455{
456 asm volatile (
457 /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
458 * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
459 * that the correct alternative is executed. Don't change the order
460 * of the next 4 instructions! */
461 "tst pc, #0x0c \n"
462 "mov r0, #0xca \n"
463 "strne r0, [%[ctl]] \n"
464 "streq r0, [%[ctl]] \n"
465 "nop \n" /* nop's needed because of pipeline */
466 "nop \n"
467 "nop \n"
468 "mrs r0, cpsr \n" /* Enable IRQ */
469 "bic r0, r0, #0x80 \n"
470 "msr cpsr_c, r0 \n"
471 :
472 : [ctl]"r"(&PROC_CTL(CURRENT_CORE))
473 : "r0"
474 );
475}
476#else
460/* PP5002 has no mailboxes - emulate using bytes */ 477/* PP5002 has no mailboxes - emulate using bytes */
461static inline void core_sleep(unsigned int core) 478static inline void core_sleep(unsigned int core)
462{ 479{
@@ -486,8 +503,8 @@ static inline void core_sleep(unsigned int core)
486 "ldrb r0, [%[sem], #0] \n" 503 "ldrb r0, [%[sem], #0] \n"
487 "cmp r0, #0 \n" 504 "cmp r0, #0 \n"
488 "bne 1b \n" 505 "bne 1b \n"
489 "mrs r0, cpsr \n" /* Enable interrupts */ 506 "mrs r0, cpsr \n" /* Enable IRQ */
490 "bic r0, r0, #0xc0 \n" 507 "bic r0, r0, #0x80 \n"
491 "msr cpsr_c, r0 \n" 508 "msr cpsr_c, r0 \n"
492 : 509 :
493 : [sem]"r"(&core_semaphores[core]), [c]"r"(core), 510 : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
@@ -512,11 +529,12 @@ static inline void core_sleep(unsigned int core)
512 /* Wait for other processor to finish wake procedure */ 529 /* Wait for other processor to finish wake procedure */
513 while (core_semaphores[core].intend_wake != 0); 530 while (core_semaphores[core].intend_wake != 0);
514 531
515 /* Enable IRQ, FIQ */ 532 /* Enable IRQ */
516 set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS); 533 set_irq_level(IRQ_ENABLED);
517#endif /* ASM/C selection */ 534#endif /* ASM/C selection */
518} 535}
519#endif /* CPU type */ 536#endif /* NUM_CORES */
537#endif /* PP CPU type */
520 538
521/*--------------------------------------------------------------------------- 539/*---------------------------------------------------------------------------
522 * Wake another processor core that is sleeping or prevent it from doing so 540 * Wake another processor core that is sleeping or prevent it from doing so
@@ -553,7 +571,7 @@ void core_wake(unsigned int othercore)
553 "strne r1, [%[ctl], %[oc], lsl #2] \n" 571 "strne r1, [%[ctl], %[oc], lsl #2] \n"
554 "mov r1, r2, lsr #4 \n" 572 "mov r1, r2, lsr #4 \n"
555 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */ 573 "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
556 "msr cpsr_c, r3 \n" /* Restore int status */ 574 "msr cpsr_c, r3 \n" /* Restore IRQ */
557 : 575 :
558 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), 576 : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
559 [oc]"r"(othercore) 577 [oc]"r"(othercore)
@@ -604,7 +622,7 @@ void core_wake(unsigned int othercore)
604 "strne r1, [r2, %[oc], lsl #2] \n" 622 "strne r1, [r2, %[oc], lsl #2] \n"
605 "mov r1, #0 \n" /* Done with wake procedure */ 623 "mov r1, #0 \n" /* Done with wake procedure */
606 "strb r1, [%[sem], #0] \n" 624 "strb r1, [%[sem], #0] \n"
607 "msr cpsr_c, r3 \n" /* Restore int status */ 625 "msr cpsr_c, r3 \n" /* Restore IRQ */
608 : 626 :
609 : [sem]"r"(&core_semaphores[othercore]), 627 : [sem]"r"(&core_semaphores[othercore]),
610 [st]"r"(&PROC_STAT), 628 [st]"r"(&PROC_STAT),
@@ -640,8 +658,8 @@ void core_wake(unsigned int othercore)
640 * 658 *
641 * Needed when a thread suicides on a core other than the main CPU since the 659 * Needed when a thread suicides on a core other than the main CPU since the
642 * stack used when idling is the stack of the last thread to run. This stack 660 * stack used when idling is the stack of the last thread to run. This stack
643 * may not reside in the core in which case the core will continue to use a 661 * may not reside in the core firmware in which case the core will continue
644 * stack from an unloaded module until another thread runs on it. 662 * to use a stack from an unloaded module until another thread runs on it.
645 *--------------------------------------------------------------------------- 663 *---------------------------------------------------------------------------
646 */ 664 */
647static inline void switch_to_idle_stack(const unsigned int core) 665static inline void switch_to_idle_stack(const unsigned int core)
@@ -670,11 +688,11 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
670 /* Flush our data to ram */ 688 /* Flush our data to ram */
671 flush_icache(); 689 flush_icache();
672 /* Stash thread in r4 slot */ 690 /* Stash thread in r4 slot */
673 thread->context.r[0] = (unsigned int)thread; 691 thread->context.r[0] = (uint32_t)thread;
674 /* Stash restart address in r5 slot */ 692 /* Stash restart address in r5 slot */
675 thread->context.r[1] = (unsigned int)thread->context.start; 693 thread->context.r[1] = thread->context.start;
676 /* Save sp in context.sp while still running on old core */ 694 /* Save sp in context.sp while still running on old core */
677 thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1]; 695 thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
678} 696}
679 697
680/*--------------------------------------------------------------------------- 698/*---------------------------------------------------------------------------
@@ -689,9 +707,8 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
689/*--------------------------------------------------------------------------- 707/*---------------------------------------------------------------------------
690 * This actually performs the core switch. 708 * This actually performs the core switch.
691 */ 709 */
692static void switch_thread_core(unsigned int core, struct thread_entry *thread) 710static void __attribute__((naked))
693 __attribute__((naked)); 711 switch_thread_core(unsigned int core, struct thread_entry *thread)
694static void switch_thread_core(unsigned int core, struct thread_entry *thread)
695{ 712{
696 /* Pure asm for this because compiler behavior isn't sufficiently predictable. 713 /* Pure asm for this because compiler behavior isn't sufficiently predictable.
697 * Stack access also isn't permitted until restoring the original stack and 714 * Stack access also isn't permitted until restoring the original stack and
@@ -705,7 +722,6 @@ static void switch_thread_core(unsigned int core, struct thread_entry *thread)
705 "mov sp, r2 \n" /* switch stacks */ 722 "mov sp, r2 \n" /* switch stacks */
706 "adr r2, 1f \n" /* r2 = new core restart address */ 723 "adr r2, 1f \n" /* r2 = new core restart address */
707 "str r2, [r1, #40] \n" /* thread->context.start = r2 */ 724 "str r2, [r1, #40] \n" /* thread->context.start = r2 */
708 "mov r0, r1 \n" /* switch_thread(thread) */
709 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ 725 "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
710 "1: \n" 726 "1: \n"
711 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ 727 "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
@@ -733,13 +749,15 @@ static inline void core_sleep(void)
733 /* FIQ also changes the CLKCON register so FIQ must be disabled 749 /* FIQ also changes the CLKCON register so FIQ must be disabled
734 when changing it here */ 750 when changing it here */
735 asm volatile ( 751 asm volatile (
736 "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */ 752 "mrs r0, cpsr \n"
737 "bic r0, r0, #0xc0 \n" 753 "orr r2, r0, #0x40 \n" /* Disable FIQ */
754 "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
755 "msr cpsr_c, r2 \n"
738 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */ 756 "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
739 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */ 757 "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
740 "orr r2, r2, #4 \n" 758 "orr r2, r2, #4 \n"
741 "str r2, [r1, #0xc] \n" 759 "str r2, [r1, #0xc] \n"
742 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */ 760 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
743 "mov r2, #0 \n" /* wait for IDLE */ 761 "mov r2, #0 \n" /* wait for IDLE */
744 "1: \n" 762 "1: \n"
745 "add r2, r2, #1 \n" 763 "add r2, r2, #1 \n"
@@ -750,13 +768,14 @@ static inline void core_sleep(void)
750 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */ 768 "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
751 "bic r2, r2, #4 \n" 769 "bic r2, r2, #4 \n"
752 "str r2, [r1, #0xc] \n" 770 "str r2, [r1, #0xc] \n"
753 "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */ 771 "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
754 : : : "r0", "r1", "r2"); 772 : : : "r0", "r1", "r2");
755} 773}
756#elif defined(CPU_TCC77X) 774#elif defined(CPU_TCC77X)
757static inline void core_sleep(void) 775static inline void core_sleep(void)
758{ 776{
759 #warning TODO: Implement core_sleep 777 #warning TODO: Implement core_sleep
778 set_irq_level(IRQ_ENABLED);
760} 779}
761#elif defined(CPU_TCC780X) 780#elif defined(CPU_TCC780X)
762static inline void core_sleep(void) 781static inline void core_sleep(void)
@@ -765,8 +784,8 @@ static inline void core_sleep(void)
765 asm volatile ( 784 asm volatile (
766 "mov r0, #0 \n" 785 "mov r0, #0 \n"
767 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ 786 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
768 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */ 787 "mrs r0, cpsr \n" /* Unmask IRQ at core level */
769 "bic r0, r0, #0xc0 \n" 788 "bic r0, r0, #0x80 \n"
770 "msr cpsr_c, r0 \n" 789 "msr cpsr_c, r0 \n"
771 : : : "r0" 790 : : : "r0"
772 ); 791 );
@@ -777,8 +796,8 @@ static inline void core_sleep(void)
777 asm volatile ( 796 asm volatile (
778 "mov r0, #0 \n" 797 "mov r0, #0 \n"
779 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */ 798 "mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
780 "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */ 799 "mrs r0, cpsr \n" /* Unmask IRQ at core level */
781 "bic r0, r0, #0xc0 \n" 800 "bic r0, r0, #0x80 \n"
782 "msr cpsr_c, r0 \n" 801 "msr cpsr_c, r0 \n"
783 : : : "r0" 802 : : : "r0"
784 ); 803 );
@@ -787,6 +806,7 @@ static inline void core_sleep(void)
787static inline void core_sleep(void) 806static inline void core_sleep(void)
788{ 807{
789 #warning core_sleep not implemented, battery life will be decreased 808 #warning core_sleep not implemented, battery life will be decreased
809 set_irq_level(0);
790} 810}
791#endif /* CONFIG_CPU == */ 811#endif /* CONFIG_CPU == */
792 812
@@ -796,8 +816,7 @@ static inline void core_sleep(void)
796 *--------------------------------------------------------------------------- 816 *---------------------------------------------------------------------------
797 */ 817 */
798void start_thread(void); /* Provide C access to ASM label */ 818void start_thread(void); /* Provide C access to ASM label */
799static void __start_thread(void) __attribute__((used)); 819static void __attribute__((used)) __start_thread(void)
800static void __start_thread(void)
801{ 820{
802 /* a0=macsr, a1=context */ 821 /* a0=macsr, a1=context */
803 asm volatile ( 822 asm volatile (
@@ -808,9 +827,8 @@ static void __start_thread(void)
808 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */ 827 "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
809 "clr.l (%a1) \n" /* Mark thread running */ 828 "clr.l (%a1) \n" /* Mark thread running */
810 "jsr (%a2) \n" /* Call thread function */ 829 "jsr (%a2) \n" /* Call thread function */
811 "clr.l -(%sp) \n" /* remove_thread(NULL) */
812 "jsr remove_thread \n"
813 ); 830 );
831 thread_exit();
814} 832}
815 833
816/* Set EMAC unit to fractional mode with saturation for each new thread, 834/* Set EMAC unit to fractional mode with saturation for each new thread,
@@ -823,9 +841,9 @@ static void __start_thread(void)
823 */ 841 */
824#define THREAD_STARTUP_INIT(core, thread, function) \ 842#define THREAD_STARTUP_INIT(core, thread, function) \
825 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \ 843 ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
826 (thread)->context.d[0] = (unsigned int)&(thread)->context, \ 844 (thread)->context.d[0] = (uint32_t)&(thread)->context, \
827 (thread)->context.d[1] = (unsigned int)start_thread, \ 845 (thread)->context.d[1] = (uint32_t)start_thread, \
828 (thread)->context.start = (void *)(function); }) 846 (thread)->context.start = (uint32_t)(function); })
829 847
830/*--------------------------------------------------------------------------- 848/*---------------------------------------------------------------------------
831 * Store non-volatile context. 849 * Store non-volatile context.
@@ -874,8 +892,7 @@ static inline void core_sleep(void)
874 *--------------------------------------------------------------------------- 892 *---------------------------------------------------------------------------
875 */ 893 */
876void start_thread(void); /* Provide C access to ASM label */ 894void start_thread(void); /* Provide C access to ASM label */
877static void __start_thread(void) __attribute__((used)); 895static void __attribute__((used)) __start_thread(void)
878static void __start_thread(void)
879{ 896{
880 /* r8 = context */ 897 /* r8 = context */
881 asm volatile ( 898 asm volatile (
@@ -885,20 +902,16 @@ static void __start_thread(void)
885 "mov #0, r1 \n" /* Start the thread */ 902 "mov #0, r1 \n" /* Start the thread */
886 "jsr @r0 \n" 903 "jsr @r0 \n"
887 "mov.l r1, @(36, r8) \n" /* Clear start address */ 904 "mov.l r1, @(36, r8) \n" /* Clear start address */
888 "mov.l 1f, r0 \n" /* remove_thread(NULL) */
889 "jmp @r0 \n"
890 "mov #0, r4 \n"
891 "1: \n"
892 ".long _remove_thread \n"
893 ); 905 );
906 thread_exit();
894} 907}
895 908
896/* Place context pointer in r8 slot, function pointer in r9 slot, and 909/* Place context pointer in r8 slot, function pointer in r9 slot, and
897 * start_thread pointer in context_start */ 910 * start_thread pointer in context_start */
898#define THREAD_STARTUP_INIT(core, thread, function) \ 911#define THREAD_STARTUP_INIT(core, thread, function) \
899 ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \ 912 ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
900 (thread)->context.r[1] = (unsigned int)(function), \ 913 (thread)->context.r[1] = (uint32_t)(function), \
901 (thread)->context.start = (void*)start_thread; }) 914 (thread)->context.start = (uint32_t)start_thread; })
902 915
903/*--------------------------------------------------------------------------- 916/*---------------------------------------------------------------------------
904 * Store non-volatile context. 917 * Store non-volatile context.
@@ -947,7 +960,7 @@ static inline void load_context(const void* addr)
947} 960}
948 961
949/*--------------------------------------------------------------------------- 962/*---------------------------------------------------------------------------
950 * Put core in a power-saving state if waking list wasn't repopulated. 963 * Put core in a power-saving state.
951 *--------------------------------------------------------------------------- 964 *---------------------------------------------------------------------------
952 */ 965 */
953static inline void core_sleep(void) 966static inline void core_sleep(void)
@@ -969,9 +982,7 @@ static inline void core_sleep(void)
969#if THREAD_EXTRA_CHECKS 982#if THREAD_EXTRA_CHECKS
970static void thread_panicf(const char *msg, struct thread_entry *thread) 983static void thread_panicf(const char *msg, struct thread_entry *thread)
971{ 984{
972#if NUM_CORES > 1 985 IF_COP( const unsigned int core = thread->core; )
973 const unsigned int core = thread->core;
974#endif
975 static char name[32]; 986 static char name[32];
976 thread_get_name(name, 32, thread); 987 thread_get_name(name, 32, thread);
977 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core)); 988 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
@@ -987,9 +998,7 @@ static void thread_stkov(struct thread_entry *thread)
987#else 998#else
988static void thread_stkov(struct thread_entry *thread) 999static void thread_stkov(struct thread_entry *thread)
989{ 1000{
990#if NUM_CORES > 1 1001 IF_COP( const unsigned int core = thread->core; )
991 const unsigned int core = thread->core;
992#endif
993 static char name[32]; 1002 static char name[32];
994 thread_get_name(name, 32, thread); 1003 thread_get_name(name, 32, thread);
995 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core)); 1004 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
@@ -998,111 +1007,67 @@ static void thread_stkov(struct thread_entry *thread)
998#define THREAD_ASSERT(exp, msg, thread) 1007#define THREAD_ASSERT(exp, msg, thread)
999#endif /* THREAD_EXTRA_CHECKS */ 1008#endif /* THREAD_EXTRA_CHECKS */
1000 1009
1001/*---------------------------------------------------------------------------
1002 * Lock a list pointer and returns its value
1003 *---------------------------------------------------------------------------
1004 */
1005#if CONFIG_CORELOCK == SW_CORELOCK
1006/* Separate locking function versions */
1007
1008/* Thread locking */ 1010/* Thread locking */
1009#define GET_THREAD_STATE(thread) \ 1011#if NUM_CORES > 1
1010 ({ corelock_lock(&(thread)->cl); (thread)->state; }) 1012#define LOCK_THREAD(thread) \
1011#define TRY_GET_THREAD_STATE(thread) \ 1013 ({ corelock_lock(&(thread)->slot_cl); })
1012 ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; }) 1014#define TRY_LOCK_THREAD(thread) \
1013#define UNLOCK_THREAD(thread, state) \ 1015 ({ corelock_try_lock(&thread->slot_cl); })
1014 ({ corelock_unlock(&(thread)->cl); }) 1016#define UNLOCK_THREAD(thread) \
1015#define UNLOCK_THREAD_SET_STATE(thread, _state) \ 1017 ({ corelock_unlock(&(thread)->slot_cl); })
1016 ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); }) 1018#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1017 1019 ({ unsigned int _core = (thread)->core; \
1018/* List locking */ 1020 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
1019#define LOCK_LIST(tqp) \ 1021 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
1020 ({ corelock_lock(&(tqp)->cl); (tqp)->queue; }) 1022#else
1021#define UNLOCK_LIST(tqp, mod) \ 1023#define LOCK_THREAD(thread) \
1022 ({ corelock_unlock(&(tqp)->cl); }) 1024 ({ })
1023#define UNLOCK_LIST_SET_PTR(tqp, mod) \ 1025#define TRY_LOCK_THREAD(thread) \
1024 ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); }) 1026 ({ })
1025 1027#define UNLOCK_THREAD(thread) \
1026/* Select the queue pointer directly */ 1028 ({ })
1027#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \ 1029#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
1028 ({ add_to_list_l(&(tqp)->queue, (thread)); }) 1030 ({ })
1029#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \ 1031#endif
1030 ({ remove_from_list_l(&(tqp)->queue, (thread)); }) 1032
1031 1033/* RTR list */
1032#elif CONFIG_CORELOCK == CORELOCK_SWAP 1034#define RTR_LOCK(core) \
1033/* Native swap/exchange versions */ 1035 ({ corelock_lock(&cores[core].rtr_cl); })
1036#define RTR_UNLOCK(core) \
1037 ({ corelock_unlock(&cores[core].rtr_cl); })
1034 1038
1035/* Thread locking */ 1039#ifdef HAVE_PRIORITY_SCHEDULING
1036#define GET_THREAD_STATE(thread) \ 1040#define rtr_add_entry(core, priority) \
1037 ({ unsigned _s; \ 1041 prio_add_entry(&cores[core].rtr, (priority))
1038 while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
1039 _s; })
1040#define TRY_GET_THREAD_STATE(thread) \
1041 ({ xchg8(&(thread)->state, STATE_BUSY); })
1042#define UNLOCK_THREAD(thread, _state) \
1043 ({ (thread)->state = (_state); })
1044#define UNLOCK_THREAD_SET_STATE(thread, _state) \
1045 ({ (thread)->state = (_state); })
1046
1047/* List locking */
1048#define LOCK_LIST(tqp) \
1049 ({ struct thread_entry *_l; \
1050 while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
1051 _l; })
1052#define UNLOCK_LIST(tqp, mod) \
1053 ({ (tqp)->queue = (mod); })
1054#define UNLOCK_LIST_SET_PTR(tqp, mod) \
1055 ({ (tqp)->queue = (mod); })
1056
1057/* Select the local queue pointer copy returned from LOCK_LIST */
1058#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1059 ({ add_to_list_l(&(tc), (thread)); })
1060#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1061 ({ remove_from_list_l(&(tc), (thread)); })
1062 1042
1043#define rtr_subtract_entry(core, priority) \
1044 prio_subtract_entry(&cores[core].rtr, (priority))
1045
1046#define rtr_move_entry(core, from, to) \
1047 prio_move_entry(&cores[core].rtr, (from), (to))
1063#else 1048#else
1064/* Single-core/non-locked versions */ 1049#define rtr_add_entry(core, priority)
1065 1050#define rtr_add_entry_inl(core, priority)
1066/* Threads */ 1051#define rtr_subtract_entry(core, priority)
1067#define GET_THREAD_STATE(thread) \ 1052#define rtr_subtract_entry_inl(core, priotity)
1068 ({ (thread)->state; }) 1053#define rtr_move_entry(core, from, to)
1069#define UNLOCK_THREAD(thread, _state) 1054#define rtr_move_entry_inl(core, from, to)
1070#define UNLOCK_THREAD_SET_STATE(thread, _state) \ 1055#endif
1071 ({ (thread)->state = (_state); })
1072
1073/* Lists */
1074#define LOCK_LIST(tqp) \
1075 ({ (tqp)->queue; })
1076#define UNLOCK_LIST(tqp, mod)
1077#define UNLOCK_LIST_SET_PTR(tqp, mod) \
1078 ({ (tqp)->queue = (mod); })
1079
1080/* Select the queue pointer directly */
1081#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
1082 ({ add_to_list_l(&(tqp)->queue, (thread)); })
1083#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
1084 ({ remove_from_list_l(&(tqp)->queue, (thread)); })
1085
1086#endif /* locking selection */
1087 1056
1088#if THREAD_EXTRA_CHECKS
1089/*--------------------------------------------------------------------------- 1057/*---------------------------------------------------------------------------
1090 * Lock the thread slot to obtain the state and then unlock it. Waits for 1058 * Thread list structure - circular:
1091 * it not to be busy. Used for debugging. 1059 * +------------------------------+
1060 * | |
1061 * +--+---+<-+---+<-+---+<-+---+<-+
1062 * Head->| T | | T | | T | | T |
1063 * +->+---+->+---+->+---+->+---+--+
1064 * | |
1065 * +------------------------------+
1092 *--------------------------------------------------------------------------- 1066 *---------------------------------------------------------------------------
1093 */ 1067 */
1094static unsigned peek_thread_state(struct thread_entry *thread)
1095{
1096 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
1097 unsigned state = GET_THREAD_STATE(thread);
1098 UNLOCK_THREAD(thread, state);
1099 set_irq_level(oldlevel);
1100 return state;
1101}
1102#endif /* THREAD_EXTRA_CHECKS */
1103 1068
1104/*--------------------------------------------------------------------------- 1069/*---------------------------------------------------------------------------
1105 * Adds a thread to a list of threads using "intert last". Uses the "l" 1070 * Adds a thread to a list of threads using "insert last". Uses the "l"
1106 * links. 1071 * links.
1107 *--------------------------------------------------------------------------- 1072 *---------------------------------------------------------------------------
1108 */ 1073 */
@@ -1114,44 +1079,18 @@ static void add_to_list_l(struct thread_entry **list,
1114 if (l == NULL) 1079 if (l == NULL)
1115 { 1080 {
1116 /* Insert into unoccupied list */ 1081 /* Insert into unoccupied list */
1117 thread->l.next = thread;
1118 thread->l.prev = thread; 1082 thread->l.prev = thread;
1083 thread->l.next = thread;
1119 *list = thread; 1084 *list = thread;
1120 return; 1085 return;
1121 } 1086 }
1122 1087
1123 /* Insert last */ 1088 /* Insert last */
1124 thread->l.next = l;
1125 thread->l.prev = l->l.prev; 1089 thread->l.prev = l->l.prev;
1126 thread->l.prev->l.next = thread; 1090 thread->l.next = l;
1091 l->l.prev->l.next = thread;
1127 l->l.prev = thread; 1092 l->l.prev = thread;
1128
1129 /* Insert next
1130 thread->l.next = l->l.next;
1131 thread->l.prev = l;
1132 thread->l.next->l.prev = thread;
1133 l->l.next = thread;
1134 */
1135}
1136
1137/*---------------------------------------------------------------------------
1138 * Locks a list, adds the thread entry and unlocks the list on multicore.
1139 * Defined as add_to_list_l on single-core.
1140 *---------------------------------------------------------------------------
1141 */
1142#if NUM_CORES > 1
1143static void add_to_list_l_locked(struct thread_queue *tq,
1144 struct thread_entry *thread)
1145{
1146 struct thread_entry *t = LOCK_LIST(tq);
1147 ADD_TO_LIST_L_SELECT(t, tq, thread);
1148 UNLOCK_LIST(tq, t);
1149 (void)t;
1150} 1093}
1151#else
1152#define add_to_list_l_locked(tq, thread) \
1153 add_to_list_l(&(tq)->queue, (thread))
1154#endif
1155 1094
1156/*--------------------------------------------------------------------------- 1095/*---------------------------------------------------------------------------
1157 * Removes a thread from a list of threads. Uses the "l" links. 1096 * Removes a thread from a list of threads. Uses the "l" links.
@@ -1180,28 +1119,20 @@ static void remove_from_list_l(struct thread_entry **list,
1180 prev = thread->l.prev; 1119 prev = thread->l.prev;
1181 1120
1182 /* Fix links to jump over the removed entry. */ 1121 /* Fix links to jump over the removed entry. */
1183 prev->l.next = next;
1184 next->l.prev = prev; 1122 next->l.prev = prev;
1123 prev->l.next = next;
1185} 1124}
1186 1125
1187/*--------------------------------------------------------------------------- 1126/*---------------------------------------------------------------------------
1188 * Locks a list, removes the thread entry and unlocks the list on multicore. 1127 * Timeout list structure - circular reverse (to make "remove item" O(1)),
1189 * Defined as remove_from_list_l on single-core. 1128 * NULL-terminated forward (to ease the far more common forward traversal):
1129 * +------------------------------+
1130 * | |
1131 * +--+---+<-+---+<-+---+<-+---+<-+
1132 * Head->| T | | T | | T | | T |
1133 * +---+->+---+->+---+->+---+-X
1190 *--------------------------------------------------------------------------- 1134 *---------------------------------------------------------------------------
1191 */ 1135 */
1192#if NUM_CORES > 1
1193static void remove_from_list_l_locked(struct thread_queue *tq,
1194 struct thread_entry *thread)
1195{
1196 struct thread_entry *t = LOCK_LIST(tq);
1197 REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
1198 UNLOCK_LIST(tq, t);
1199 (void)t;
1200}
1201#else
1202#define remove_from_list_l_locked(tq, thread) \
1203 remove_from_list_l(&(tq)->queue, (thread))
1204#endif
1205 1136
1206/*--------------------------------------------------------------------------- 1137/*---------------------------------------------------------------------------
1207 * Add a thread from the core's timout list by linking the pointers in its 1138 * Add a thread from the core's timout list by linking the pointers in its
@@ -1210,19 +1141,24 @@ static void remove_from_list_l_locked(struct thread_queue *tq,
1210 */ 1141 */
1211static void add_to_list_tmo(struct thread_entry *thread) 1142static void add_to_list_tmo(struct thread_entry *thread)
1212{ 1143{
1213 /* Insert first */ 1144 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
1214 struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout; 1145 THREAD_ASSERT(thread->tmo.prev == NULL,
1146 "add_to_list_tmo->already listed", thread);
1215 1147
1216 thread->tmo.prev = thread; 1148 thread->tmo.next = NULL;
1217 thread->tmo.next = t;
1218 1149
1219 if (t != NULL) 1150 if (tmo == NULL)
1220 { 1151 {
1221 /* Fix second item's prev pointer to point to this thread */ 1152 /* Insert into unoccupied list */
1222 t->tmo.prev = thread; 1153 thread->tmo.prev = thread;
1154 cores[IF_COP_CORE(thread->core)].timeout = thread;
1155 return;
1223 } 1156 }
1224 1157
1225 cores[IF_COP_CORE(thread->core)].timeout = thread; 1158 /* Insert Last */
1159 thread->tmo.prev = tmo->tmo.prev;
1160 tmo->tmo.prev->tmo.next = thread;
1161 tmo->tmo.prev = thread;
1226} 1162}
1227 1163
1228/*--------------------------------------------------------------------------- 1164/*---------------------------------------------------------------------------
@@ -1233,91 +1169,520 @@ static void add_to_list_tmo(struct thread_entry *thread)
1233 */ 1169 */
1234static void remove_from_list_tmo(struct thread_entry *thread) 1170static void remove_from_list_tmo(struct thread_entry *thread)
1235{ 1171{
1172 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
1173 struct thread_entry *prev = thread->tmo.prev;
1236 struct thread_entry *next = thread->tmo.next; 1174 struct thread_entry *next = thread->tmo.next;
1237 struct thread_entry *prev;
1238 1175
1239 if (thread == cores[IF_COP_CORE(thread->core)].timeout) 1176 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
1177
1178 if (next != NULL)
1179 next->tmo.prev = prev;
1180
1181 if (thread == *list)
1182 {
1183 /* List becomes next item and empty if next == NULL */
1184 *list = next;
1185 /* Mark as unlisted */
1186 thread->tmo.prev = NULL;
1187 }
1188 else
1189 {
1190 if (next == NULL)
1191 (*list)->tmo.prev = prev;
1192 prev->tmo.next = next;
1193 /* Mark as unlisted */
1194 thread->tmo.prev = NULL;
1195 }
1196}
1197
1198
1199#ifdef HAVE_PRIORITY_SCHEDULING
1200/*---------------------------------------------------------------------------
1201 * Priority distribution structure (one category for each possible priority):
1202 *
1203 * +----+----+----+ ... +-----+
1204 * hist: | F0 | F1 | F2 | | F31 |
1205 * +----+----+----+ ... +-----+
1206 * mask: | b0 | b1 | b2 | | b31 |
1207 * +----+----+----+ ... +-----+
1208 *
1209 * F = count of threads at priority category n (frequency)
1210 * b = bitmask of non-zero priority categories (occupancy)
1211 *
1212 * / if H[n] != 0 : 1
1213 * b[n] = |
1214 * \ else : 0
1215 *
1216 *---------------------------------------------------------------------------
1217 * Basic priority inheritance priotocol (PIP):
1218 *
1219 * Mn = mutex n, Tn = thread n
1220 *
1221 * A lower priority thread inherits the priority of the highest priority
1222 * thread blocked waiting for it to complete an action (such as release a
1223 * mutex or respond to a message via queue_send):
1224 *
1225 * 1) T2->M1->T1
1226 *
1227 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
1228 * priority than T1 then T1 inherits the priority of T2.
1229 *
1230 * 2) T3
1231 * \/
1232 * T2->M1->T1
1233 *
1234 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
1235 * T1 inherits the higher of T2 and T3.
1236 *
1237 * 3) T3->M2->T2->M1->T1
1238 *
1239 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
1240 * then T1 inherits the priority of T3 through T2.
1241 *
1242 * Blocking chains can grow arbitrarily complex (though it's best that they
1243 * not form at all very often :) and build-up from these units.
1244 *---------------------------------------------------------------------------
1245 */
1246
1247/*---------------------------------------------------------------------------
1248 * Increment frequency at category "priority"
1249 *---------------------------------------------------------------------------
1250 */
1251static inline unsigned int prio_add_entry(
1252 struct priority_distribution *pd, int priority)
1253{
1254 unsigned int count;
1255 /* Enough size/instruction count difference for ARM makes it worth it to
1256 * use different code (192 bytes for ARM). Only thing better is ASM. */
1257#ifdef CPU_ARM
1258 count = pd->hist[priority];
1259 if (++count == 1)
1260 pd->mask |= 1 << priority;
1261 pd->hist[priority] = count;
1262#else /* This one's better for Coldfire */
1263 if ((count = ++pd->hist[priority]) == 1)
1264 pd->mask |= 1 << priority;
1265#endif
1266
1267 return count;
1268}
1269
1270/*---------------------------------------------------------------------------
1271 * Decrement frequency at category "priority"
1272 *---------------------------------------------------------------------------
1273 */
1274static inline unsigned int prio_subtract_entry(
1275 struct priority_distribution *pd, int priority)
1276{
1277 unsigned int count;
1278
1279#ifdef CPU_ARM
1280 count = pd->hist[priority];
1281 if (--count == 0)
1282 pd->mask &= ~(1 << priority);
1283 pd->hist[priority] = count;
1284#else
1285 if ((count = --pd->hist[priority]) == 0)
1286 pd->mask &= ~(1 << priority);
1287#endif
1288
1289 return count;
1290}
1291
1292/*---------------------------------------------------------------------------
1293 * Remove from one category and add to another
1294 *---------------------------------------------------------------------------
1295 */
1296static inline void prio_move_entry(
1297 struct priority_distribution *pd, int from, int to)
1298{
1299 uint32_t mask = pd->mask;
1300
1301#ifdef CPU_ARM
1302 unsigned int count;
1303
1304 count = pd->hist[from];
1305 if (--count == 0)
1306 mask &= ~(1 << from);
1307 pd->hist[from] = count;
1308
1309 count = pd->hist[to];
1310 if (++count == 1)
1311 mask |= 1 << to;
1312 pd->hist[to] = count;
1313#else
1314 if (--pd->hist[from] == 0)
1315 mask &= ~(1 << from);
1316
1317 if (++pd->hist[to] == 1)
1318 mask |= 1 << to;
1319#endif
1320
1321 pd->mask = mask;
1322}
1323
1324/*---------------------------------------------------------------------------
1325 * Change the priority and rtr entry for a running thread
1326 *---------------------------------------------------------------------------
1327 */
1328static inline void set_running_thread_priority(
1329 struct thread_entry *thread, int priority)
1330{
1331 const unsigned int core = IF_COP_CORE(thread->core);
1332 RTR_LOCK(core);
1333 rtr_move_entry(core, thread->priority, priority);
1334 thread->priority = priority;
1335 RTR_UNLOCK(core);
1336}
1337
1338/*---------------------------------------------------------------------------
1339 * Finds the highest priority thread in a list of threads. If the list is
1340 * empty, the PRIORITY_IDLE is returned.
1341 *
1342 * It is possible to use the struct priority_distribution within an object
1343 * instead of scanning the remaining threads in the list but as a compromise,
1344 * the resulting per-object memory overhead is saved at a slight speed
1345 * penalty under high contention.
1346 *---------------------------------------------------------------------------
1347 */
1348static int find_highest_priority_in_list_l(
1349 struct thread_entry * const thread)
1350{
1351 if (thread != NULL)
1240 { 1352 {
1241 /* Next item becomes list head */ 1353 /* Go though list until the ending up at the initial thread */
1242 cores[IF_COP_CORE(thread->core)].timeout = next; 1354 int highest_priority = thread->priority;
1355 struct thread_entry *curr = thread;
1243 1356
1244 if (next != NULL) 1357 do
1245 { 1358 {
1246 /* Fix new list head's prev to point to itself. */ 1359 int priority = curr->priority;
1247 next->tmo.prev = next; 1360
1361 if (priority < highest_priority)
1362 highest_priority = priority;
1363
1364 curr = curr->l.next;
1248 } 1365 }
1366 while (curr != thread);
1249 1367
1250 thread->tmo.prev = NULL; 1368 return highest_priority;
1251 return;
1252 } 1369 }
1253 1370
1254 prev = thread->tmo.prev; 1371 return PRIORITY_IDLE;
1372}
1255 1373
1256 if (next != NULL) 1374/*---------------------------------------------------------------------------
1375 * Register priority with blocking system and bubble it down the chain if
1376 * any until we reach the end or something is already equal or higher.
1377 *
1378 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
1379 * targets but that same action also guarantees a circular block anyway and
1380 * those are prevented, right? :-)
1381 *---------------------------------------------------------------------------
1382 */
1383static struct thread_entry *
1384 blocker_inherit_priority(struct thread_entry *current)
1385{
1386 const int priority = current->priority;
1387 struct blocker *bl = current->blocker;
1388 struct thread_entry * const tstart = current;
1389 struct thread_entry *bl_t = bl->thread;
1390
1391 /* Blocker cannot change since the object protection is held */
1392 LOCK_THREAD(bl_t);
1393
1394 for (;;)
1257 { 1395 {
1258 next->tmo.prev = prev; 1396 struct thread_entry *next;
1397 int bl_pr = bl->priority;
1398
1399 if (priority >= bl_pr)
1400 break; /* Object priority already high enough */
1401
1402 bl->priority = priority;
1403
1404 /* Add this one */
1405 prio_add_entry(&bl_t->pdist, priority);
1406
1407 if (bl_pr < PRIORITY_IDLE)
1408 {
1409 /* Not first waiter - subtract old one */
1410 prio_subtract_entry(&bl_t->pdist, bl_pr);
1411 }
1412
1413 if (priority >= bl_t->priority)
1414 break; /* Thread priority high enough */
1415
1416 if (bl_t->state == STATE_RUNNING)
1417 {
1418 /* Blocking thread is a running thread therefore there are no
1419 * further blockers. Change the "run queue" on which it
1420 * resides. */
1421 set_running_thread_priority(bl_t, priority);
1422 break;
1423 }
1424
1425 bl_t->priority = priority;
1426
1427 /* If blocking thread has a blocker, apply transitive inheritance */
1428 bl = bl_t->blocker;
1429
1430 if (bl == NULL)
1431 break; /* End of chain or object doesn't support inheritance */
1432
1433 next = bl->thread;
1434
1435 if (next == tstart)
1436 break; /* Full-circle - deadlock! */
1437
1438 UNLOCK_THREAD(current);
1439
1440#if NUM_CORES > 1
1441 for (;;)
1442 {
1443 LOCK_THREAD(next);
1444
1445 /* Blocker could change - retest condition */
1446 if (bl->thread == next)
1447 break;
1448
1449 UNLOCK_THREAD(next);
1450 next = bl->thread;
1451 }
1452#endif
1453 current = bl_t;
1454 bl_t = next;
1259 } 1455 }
1260 1456
1261 prev->tmo.next = next; 1457 UNLOCK_THREAD(bl_t);
1262 thread->tmo.prev = NULL; 1458
1459 return current;
1263} 1460}
1264 1461
1265/*--------------------------------------------------------------------------- 1462/*---------------------------------------------------------------------------
1266 * Schedules a thread wakeup on the specified core. Threads will be made 1463 * Readjust priorities when waking a thread blocked waiting for another
1267 * ready to run when the next task switch occurs. Note that this does not 1464 * in essence "releasing" the thread's effect on the object owner. Can be
1268 * introduce an on-core delay since the soonest the next thread may run is 1465 * performed from any context.
1269 * no sooner than that. Other cores and on-core interrupts may only ever
1270 * add to the list.
1271 *--------------------------------------------------------------------------- 1466 *---------------------------------------------------------------------------
1272 */ 1467 */
1273static void core_schedule_wakeup(struct thread_entry *thread) 1468struct thread_entry *
1469 wakeup_priority_protocol_release(struct thread_entry *thread)
1274{ 1470{
1275 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 1471 const int priority = thread->priority;
1276 const unsigned int core = IF_COP_CORE(thread->core); 1472 struct blocker *bl = thread->blocker;
1277 add_to_list_l_locked(&cores[core].waking, thread); 1473 struct thread_entry * const tstart = thread;
1474 struct thread_entry *bl_t = bl->thread;
1475
1476 /* Blocker cannot change since object will be locked */
1477 LOCK_THREAD(bl_t);
1478
1479 thread->blocker = NULL; /* Thread not blocked */
1480
1481 for (;;)
1482 {
1483 struct thread_entry *next;
1484 int bl_pr = bl->priority;
1485
1486 if (priority > bl_pr)
1487 break; /* Object priority higher */
1488
1489 next = *thread->bqp;
1490
1491 if (next == NULL)
1492 {
1493 /* No more threads in queue */
1494 prio_subtract_entry(&bl_t->pdist, bl_pr);
1495 bl->priority = PRIORITY_IDLE;
1496 }
1497 else
1498 {
1499 /* Check list for highest remaining priority */
1500 int queue_pr = find_highest_priority_in_list_l(next);
1501
1502 if (queue_pr == bl_pr)
1503 break; /* Object priority not changing */
1504
1505 /* Change queue priority */
1506 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
1507 bl->priority = queue_pr;
1508 }
1509
1510 if (bl_pr > bl_t->priority)
1511 break; /* thread priority is higher */
1512
1513 bl_pr = find_first_set_bit(bl_t->pdist.mask);
1514
1515 if (bl_pr == bl_t->priority)
1516 break; /* Thread priority not changing */
1517
1518 if (bl_t->state == STATE_RUNNING)
1519 {
1520 /* No further blockers */
1521 set_running_thread_priority(bl_t, bl_pr);
1522 break;
1523 }
1524
1525 bl_t->priority = bl_pr;
1526
1527 /* If blocking thread has a blocker, apply transitive inheritance */
1528 bl = bl_t->blocker;
1529
1530 if (bl == NULL)
1531 break; /* End of chain or object doesn't support inheritance */
1532
1533 next = bl->thread;
1534
1535 if (next == tstart)
1536 break; /* Full-circle - deadlock! */
1537
1538 UNLOCK_THREAD(thread);
1539
1278#if NUM_CORES > 1 1540#if NUM_CORES > 1
1279 if (core != CURRENT_CORE) 1541 for (;;)
1542 {
1543 LOCK_THREAD(next);
1544
1545 /* Blocker could change - retest condition */
1546 if (bl->thread == next)
1547 break;
1548
1549 UNLOCK_THREAD(next);
1550 next = bl->thread;
1551 }
1552#endif
1553 thread = bl_t;
1554 bl_t = next;
1555 }
1556
1557 UNLOCK_THREAD(bl_t);
1558
1559#if NUM_CORES > 1
1560 if (thread != tstart)
1280 { 1561 {
1281 core_wake(core); 1562 /* Relock original if it changed */
1563 LOCK_THREAD(tstart);
1282 } 1564 }
1283#endif 1565#endif
1284 set_irq_level(oldlevel); 1566
1567 return cores[CURRENT_CORE].running;
1285} 1568}
1286 1569
1287/*--------------------------------------------------------------------------- 1570/*---------------------------------------------------------------------------
1288 * If the waking list was populated, move all threads on it onto the running 1571 * Transfer ownership to a thread waiting for an objects and transfer
1289 * list so they may be run ASAP. 1572 * inherited priority boost from other waiters. This algorithm knows that
1573 * blocking chains may only unblock from the very end.
1574 *
1575 * Only the owning thread itself may call this and so the assumption that
1576 * it is the running thread is made.
1290 *--------------------------------------------------------------------------- 1577 *---------------------------------------------------------------------------
1291 */ 1578 */
1292static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core)) 1579struct thread_entry *
1580 wakeup_priority_protocol_transfer(struct thread_entry *thread)
1293{ 1581{
1294 struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking); 1582 /* Waking thread inherits priority boost from object owner */
1295 struct thread_entry *r = cores[IF_COP_CORE(core)].running; 1583 struct blocker *bl = thread->blocker;
1584 struct thread_entry *bl_t = bl->thread;
1585 struct thread_entry *next;
1586 int bl_pr;
1296 1587
1297 /* Tranfer all threads on waking list to running list in one 1588 THREAD_ASSERT(thread_get_current() == bl_t,
1298 swoop */ 1589 "UPPT->wrong thread", thread_get_current());
1299 if (r != NULL) 1590
1591 LOCK_THREAD(bl_t);
1592
1593 bl_pr = bl->priority;
1594
1595 /* Remove the object's boost from the owning thread */
1596 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
1597 bl_pr <= bl_t->priority)
1300 { 1598 {
1301 /* Place waking threads at the end of the running list. */ 1599 /* No more threads at this priority are waiting and the old level is
1302 struct thread_entry *tmp; 1600 * at least the thread level */
1303 w->l.prev->l.next = r; 1601 int priority = find_first_set_bit(bl_t->pdist.mask);
1304 r->l.prev->l.next = w; 1602
1305 tmp = r->l.prev; 1603 if (priority != bl_t->priority)
1306 r->l.prev = w->l.prev; 1604 {
1307 w->l.prev = tmp; 1605 /* Adjust this thread's priority */
1606 set_running_thread_priority(bl_t, priority);
1607 }
1608 }
1609
1610 next = *thread->bqp;
1611
1612 if (next == NULL)
1613 {
1614 /* Expected shortcut - no more waiters */
1615 bl_pr = PRIORITY_IDLE;
1308 } 1616 }
1309 else 1617 else
1310 { 1618 {
1311 /* Just transfer the list as-is */ 1619 if (thread->priority <= bl_pr)
1312 cores[IF_COP_CORE(core)].running = w; 1620 {
1621 /* Need to scan threads remaining in queue */
1622 bl_pr = find_highest_priority_in_list_l(next);
1623 }
1624
1625 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
1626 bl_pr < thread->priority)
1627 {
1628 /* Thread priority must be raised */
1629 thread->priority = bl_pr;
1630 }
1631 }
1632
1633 bl->thread = thread; /* This thread pwns */
1634 bl->priority = bl_pr; /* Save highest blocked priority */
1635 thread->blocker = NULL; /* Thread not blocked */
1636
1637 UNLOCK_THREAD(bl_t);
1638
1639 return bl_t;
1640}
1641
1642/*---------------------------------------------------------------------------
1643 * No threads must be blocked waiting for this thread except for it to exit.
1644 * The alternative is more elaborate cleanup and object registration code.
1645 * Check this for risk of silent data corruption when objects with
1646 * inheritable blocking are abandoned by the owner - not precise but may
1647 * catch something.
1648 *---------------------------------------------------------------------------
1649 */
1650void check_for_obj_waiters(const char *function, struct thread_entry *thread)
1651{
1652 /* Only one bit in the mask should be set with a frequency on 1 which
1653 * represents the thread's own base priority */
1654 uint32_t mask = thread->pdist.mask;
1655 if ((mask & (mask - 1)) != 0 ||
1656 thread->pdist.hist[find_first_set_bit(mask)] > 1)
1657 {
1658 unsigned char name[32];
1659 thread_get_name(name, 32, thread);
1660 panicf("%s->%s with obj. waiters", function, name);
1313 } 1661 }
1314 /* Just leave any timeout threads on the timeout list. If a timeout check 1662}
1315 * is due, they will be removed there. If they do a timeout again before 1663#endif /* HAVE_PRIORITY_SCHEDULING */
1316 * being removed, they will just stay on the list with a new expiration 1664
1317 * tick. */ 1665/*---------------------------------------------------------------------------
1666 * Move a thread back to a running state on its core.
1667 *---------------------------------------------------------------------------
1668 */
1669static void core_schedule_wakeup(struct thread_entry *thread)
1670{
1671 const unsigned int core = IF_COP_CORE(thread->core);
1672
1673 RTR_LOCK(core);
1674
1675 thread->state = STATE_RUNNING;
1676
1677 add_to_list_l(&cores[core].running, thread);
1678 rtr_add_entry(core, thread->priority);
1679
1680 RTR_UNLOCK(core);
1318 1681
1319 /* Waking list is clear - NULL and unlock it */ 1682#if NUM_CORES > 1
1320 UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL); 1683 if (core != CURRENT_CORE)
1684 core_wake(core);
1685#endif
1321} 1686}
1322 1687
1323/*--------------------------------------------------------------------------- 1688/*---------------------------------------------------------------------------
@@ -1326,7 +1691,7 @@ static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
1326 * tick when the next check will occur. 1691 * tick when the next check will occur.
1327 *--------------------------------------------------------------------------- 1692 *---------------------------------------------------------------------------
1328 */ 1693 */
1329static void check_tmo_threads(void) 1694void check_tmo_threads(void)
1330{ 1695{
1331 const unsigned int core = CURRENT_CORE; 1696 const unsigned int core = CURRENT_CORE;
1332 const long tick = current_tick; /* snapshot the current tick */ 1697 const long tick = current_tick; /* snapshot the current tick */
@@ -1335,54 +1700,98 @@ static void check_tmo_threads(void)
1335 1700
1336 /* If there are no processes waiting for a timeout, just keep the check 1701 /* If there are no processes waiting for a timeout, just keep the check
1337 tick from falling into the past. */ 1702 tick from falling into the past. */
1338 if (next != NULL) 1703
1704 /* Break the loop once we have walked through the list of all
1705 * sleeping processes or have removed them all. */
1706 while (next != NULL)
1339 { 1707 {
1340 /* Check sleeping threads. */ 1708 /* Check sleeping threads. Allow interrupts between checks. */
1341 do 1709 set_irq_level(0);
1342 {
1343 /* Must make sure noone else is examining the state, wait until
1344 slot is no longer busy */
1345 struct thread_entry *curr = next;
1346 next = curr->tmo.next;
1347 1710
1348 unsigned state = GET_THREAD_STATE(curr); 1711 struct thread_entry *curr = next;
1349 1712
1350 if (state < TIMEOUT_STATE_FIRST) 1713 next = curr->tmo.next;
1351 { 1714
1352 /* Cleanup threads no longer on a timeout but still on the 1715 /* Lock thread slot against explicit wakeup */
1353 * list. */ 1716 set_irq_level(HIGHEST_IRQ_LEVEL);
1354 remove_from_list_tmo(curr); 1717 LOCK_THREAD(curr);
1355 UNLOCK_THREAD(curr, state); /* Unlock thread slot */ 1718
1356 } 1719 unsigned state = curr->state;
1357 else if (TIME_BEFORE(tick, curr->tmo_tick)) 1720
1721 if (state < TIMEOUT_STATE_FIRST)
1722 {
1723 /* Cleanup threads no longer on a timeout but still on the
1724 * list. */
1725 remove_from_list_tmo(curr);
1726 }
1727 else if (TIME_BEFORE(tick, curr->tmo_tick))
1728 {
1729 /* Timeout still pending - this will be the usual case */
1730 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
1358 { 1731 {
1359 /* Timeout still pending - this will be the usual case */ 1732 /* Earliest timeout found so far - move the next check up
1360 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check)) 1733 to its time */
1361 { 1734 next_tmo_check = curr->tmo_tick;
1362 /* Earliest timeout found so far - move the next check up
1363 to its time */
1364 next_tmo_check = curr->tmo_tick;
1365 }
1366 UNLOCK_THREAD(curr, state); /* Unlock thread slot */
1367 } 1735 }
1368 else 1736 }
1737 else
1738 {
1739 /* Sleep timeout has been reached so bring the thread back to
1740 * life again. */
1741 if (state == STATE_BLOCKED_W_TMO)
1369 { 1742 {
1370 /* Sleep timeout has been reached so bring the thread back to 1743#if NUM_CORES > 1
1371 * life again. */ 1744 /* Lock the waiting thread's kernel object */
1372 if (state == STATE_BLOCKED_W_TMO) 1745 struct corelock *ocl = curr->obj_cl;
1746
1747 if (corelock_try_lock(ocl) == 0)
1373 { 1748 {
1374 remove_from_list_l_locked(curr->bqp, curr); 1749 /* Need to retry in the correct order though the need is
1750 * unlikely */
1751 UNLOCK_THREAD(curr);
1752 corelock_lock(ocl);
1753 LOCK_THREAD(curr);
1754
1755 if (curr->state != STATE_BLOCKED_W_TMO)
1756 {
1757 /* Thread was woken or removed explicitely while slot
1758 * was unlocked */
1759 corelock_unlock(ocl);
1760 remove_from_list_tmo(curr);
1761 UNLOCK_THREAD(curr);
1762 continue;
1763 }
1375 } 1764 }
1765#endif /* NUM_CORES */
1766
1767 remove_from_list_l(curr->bqp, curr);
1768
1769#ifdef HAVE_WAKEUP_EXT_CB
1770 if (curr->wakeup_ext_cb != NULL)
1771 curr->wakeup_ext_cb(curr);
1772#endif
1376 1773
1377 remove_from_list_tmo(curr); 1774#ifdef HAVE_PRIORITY_SCHEDULING
1378 add_to_list_l(&cores[core].running, curr); 1775 if (curr->blocker != NULL)
1379 UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING); 1776 wakeup_priority_protocol_release(curr);
1777#endif
1778 corelock_unlock(ocl);
1380 } 1779 }
1780 /* else state == STATE_SLEEPING */
1781
1782 remove_from_list_tmo(curr);
1783
1784 RTR_LOCK(core);
1381 1785
1382 /* Break the loop once we have walked through the list of all 1786 curr->state = STATE_RUNNING;
1383 * sleeping processes or have removed them all. */ 1787
1788 add_to_list_l(&cores[core].running, curr);
1789 rtr_add_entry(core, curr->priority);
1790
1791 RTR_UNLOCK(core);
1384 } 1792 }
1385 while (next != NULL); 1793
1794 UNLOCK_THREAD(curr);
1386 } 1795 }
1387 1796
1388 cores[core].next_tmo_check = next_tmo_check; 1797 cores[core].next_tmo_check = next_tmo_check;
@@ -1390,109 +1799,33 @@ static void check_tmo_threads(void)
1390 1799
1391/*--------------------------------------------------------------------------- 1800/*---------------------------------------------------------------------------
1392 * Performs operations that must be done before blocking a thread but after 1801 * Performs operations that must be done before blocking a thread but after
1393 * the state is saved - follows reverse of locking order. blk_ops.flags is 1802 * the state is saved.
1394 * assumed to be nonzero.
1395 *--------------------------------------------------------------------------- 1803 *---------------------------------------------------------------------------
1396 */ 1804 */
1397#if NUM_CORES > 1 1805#if NUM_CORES > 1
1398static inline void run_blocking_ops( 1806static inline void run_blocking_ops(
1399 unsigned int core, struct thread_entry *thread) 1807 unsigned int core, struct thread_entry *thread)
1400{ 1808{
1401 struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops; 1809 struct thread_blk_ops *ops = &cores[core].blk_ops;
1402 const unsigned flags = ops->flags; 1810 const unsigned flags = ops->flags;
1403 1811
1404 if (flags == 0) 1812 if (flags == TBOP_CLEAR)
1405 return; 1813 return;
1406 1814
1407 if (flags & TBOP_SWITCH_CORE) 1815 switch (flags)
1408 { 1816 {
1817 case TBOP_SWITCH_CORE:
1409 core_switch_blk_op(core, thread); 1818 core_switch_blk_op(core, thread);
1410 } 1819 /* Fall-through */
1411 1820 case TBOP_UNLOCK_CORELOCK:
1412#if CONFIG_CORELOCK == SW_CORELOCK
1413 if (flags & TBOP_UNLOCK_LIST)
1414 {
1415 UNLOCK_LIST(ops->list_p, NULL);
1416 }
1417
1418 if (flags & TBOP_UNLOCK_CORELOCK)
1419 {
1420 corelock_unlock(ops->cl_p); 1821 corelock_unlock(ops->cl_p);
1421 }
1422
1423 if (flags & TBOP_UNLOCK_THREAD)
1424 {
1425 UNLOCK_THREAD(ops->thread, 0);
1426 }
1427#elif CONFIG_CORELOCK == CORELOCK_SWAP
1428 /* Write updated variable value into memory location */
1429 switch (flags & TBOP_VAR_TYPE_MASK)
1430 {
1431 case TBOP_UNLOCK_LIST:
1432 UNLOCK_LIST(ops->list_p, ops->list_v);
1433 break;
1434 case TBOP_SET_VARi:
1435 *ops->var_ip = ops->var_iv;
1436 break;
1437 case TBOP_SET_VARu8:
1438 *ops->var_u8p = ops->var_u8v;
1439 break; 1822 break;
1440 } 1823 }
1441#endif /* CONFIG_CORELOCK == */
1442 1824
1443 /* Unlock thread's slot */ 1825 ops->flags = TBOP_CLEAR;
1444 if (flags & TBOP_UNLOCK_CURRENT)
1445 {
1446 UNLOCK_THREAD(thread, ops->state);
1447 }
1448
1449 ops->flags = 0;
1450} 1826}
1451#endif /* NUM_CORES > 1 */ 1827#endif /* NUM_CORES > 1 */
1452 1828
1453
1454/*---------------------------------------------------------------------------
1455 * Runs any operations that may cause threads to be ready to run and then
1456 * sleeps the processor core until the next interrupt if none are.
1457 *---------------------------------------------------------------------------
1458 */
1459static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
1460{
1461 for (;;)
1462 {
1463 set_irq_level(HIGHEST_IRQ_LEVEL);
1464 /* We want to do these ASAP as it may change the decision to sleep
1465 * the core or a core has woken because an interrupt occurred
1466 * and posted a message to a queue. */
1467 if (cores[IF_COP_CORE(core)].waking.queue != NULL)
1468 {
1469 core_perform_wakeup(IF_COP(core));
1470 }
1471
1472 /* If there are threads on a timeout and the earliest wakeup is due,
1473 * check the list and wake any threads that need to start running
1474 * again. */
1475 if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
1476 {
1477 check_tmo_threads();
1478 }
1479
1480 /* If there is a ready to run task, return its ID and keep core
1481 * awake. */
1482 if (cores[IF_COP_CORE(core)].running == NULL)
1483 {
1484 /* Enter sleep mode to reduce power usage - woken up on interrupt
1485 * or wakeup request from another core - expected to enable all
1486 * interrupts. */
1487 core_sleep(IF_COP(core));
1488 continue;
1489 }
1490
1491 set_irq_level(0);
1492 return cores[IF_COP_CORE(core)].running;
1493 }
1494}
1495
1496#ifdef RB_PROFILE 1829#ifdef RB_PROFILE
1497void profile_thread(void) 1830void profile_thread(void)
1498{ 1831{
@@ -1502,55 +1835,34 @@ void profile_thread(void)
1502 1835
1503/*--------------------------------------------------------------------------- 1836/*---------------------------------------------------------------------------
1504 * Prepares a thread to block on an object's list and/or for a specified 1837 * Prepares a thread to block on an object's list and/or for a specified
1505 * duration - expects object and slot to be appropriately locked if needed. 1838 * duration - expects object and slot to be appropriately locked if needed
1839 * and interrupts to be masked.
1506 *--------------------------------------------------------------------------- 1840 *---------------------------------------------------------------------------
1507 */ 1841 */
1508static inline void _block_thread_on_l(struct thread_queue *list, 1842static inline void block_thread_on_l(struct thread_entry *thread,
1509 struct thread_entry *thread, 1843 unsigned state)
1510 unsigned state
1511 IF_SWCL(, const bool nolock))
1512{ 1844{
1513 /* If inlined, unreachable branches will be pruned with no size penalty 1845 /* If inlined, unreachable branches will be pruned with no size penalty
1514 because constant params are used for state and nolock. */ 1846 because state is passed as a constant parameter. */
1515 const unsigned int core = IF_COP_CORE(thread->core); 1847 const unsigned int core = IF_COP_CORE(thread->core);
1516 1848
1517 /* Remove the thread from the list of running threads. */ 1849 /* Remove the thread from the list of running threads. */
1850 RTR_LOCK(core);
1518 remove_from_list_l(&cores[core].running, thread); 1851 remove_from_list_l(&cores[core].running, thread);
1852 rtr_subtract_entry(core, thread->priority);
1853 RTR_UNLOCK(core);
1519 1854
1520 /* Add a timeout to the block if not infinite */ 1855 /* Add a timeout to the block if not infinite */
1521 switch (state) 1856 switch (state)
1522 { 1857 {
1523 case STATE_BLOCKED: 1858 case STATE_BLOCKED:
1524 /* Put the thread into a new list of inactive threads. */
1525#if CONFIG_CORELOCK == SW_CORELOCK
1526 if (nolock)
1527 {
1528 thread->bqp = NULL; /* Indicate nolock list */
1529 thread->bqnlp = (struct thread_entry **)list;
1530 add_to_list_l((struct thread_entry **)list, thread);
1531 }
1532 else
1533#endif
1534 {
1535 thread->bqp = list;
1536 add_to_list_l_locked(list, thread);
1537 }
1538 break;
1539 case STATE_BLOCKED_W_TMO: 1859 case STATE_BLOCKED_W_TMO:
1540 /* Put the thread into a new list of inactive threads. */ 1860 /* Put the thread into a new list of inactive threads. */
1541#if CONFIG_CORELOCK == SW_CORELOCK 1861 add_to_list_l(thread->bqp, thread);
1542 if (nolock) 1862
1543 { 1863 if (state == STATE_BLOCKED)
1544 thread->bqp = NULL; /* Indicate nolock list */ 1864 break;
1545 thread->bqnlp = (struct thread_entry **)list; 1865
1546 add_to_list_l((struct thread_entry **)list, thread);
1547 }
1548 else
1549#endif
1550 {
1551 thread->bqp = list;
1552 add_to_list_l_locked(list, thread);
1553 }
1554 /* Fall-through */ 1866 /* Fall-through */
1555 case STATE_SLEEPING: 1867 case STATE_SLEEPING:
1556 /* If this thread times out sooner than any other thread, update 1868 /* If this thread times out sooner than any other thread, update
@@ -1568,35 +1880,11 @@ static inline void _block_thread_on_l(struct thread_queue *list,
1568 break; 1880 break;
1569 } 1881 }
1570 1882
1571#ifdef HAVE_PRIORITY_SCHEDULING 1883 /* Remember the the next thread about to block. */
1572 /* Reset priorities */ 1884 cores[core].block_task = thread;
1573 if (thread->priority == cores[core].highest_priority)
1574 cores[core].highest_priority = LOWEST_PRIORITY;
1575#endif
1576 1885
1577#if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK 1886 /* Report new state. */
1578 /* Safe to set state now */
1579 thread->state = state; 1887 thread->state = state;
1580#elif CONFIG_CORELOCK == CORELOCK_SWAP
1581 cores[core].blk_ops.state = state;
1582#endif
1583
1584#if NUM_CORES > 1
1585 /* Delay slot unlock until task switch */
1586 cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
1587#endif
1588}
1589
1590static inline void block_thread_on_l(
1591 struct thread_queue *list, struct thread_entry *thread, unsigned state)
1592{
1593 _block_thread_on_l(list, thread, state IF_SWCL(, false));
1594}
1595
1596static inline void block_thread_on_l_no_listlock(
1597 struct thread_entry **list, struct thread_entry *thread, unsigned state)
1598{
1599 _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
1600} 1888}
1601 1889
1602/*--------------------------------------------------------------------------- 1890/*---------------------------------------------------------------------------
@@ -1607,72 +1895,134 @@ static inline void block_thread_on_l_no_listlock(
1607 * INTERNAL: Intended for use by kernel and not for programs. 1895 * INTERNAL: Intended for use by kernel and not for programs.
1608 *--------------------------------------------------------------------------- 1896 *---------------------------------------------------------------------------
1609 */ 1897 */
1610void switch_thread(struct thread_entry *old) 1898void switch_thread(void)
1611{ 1899{
1612 const unsigned int core = CURRENT_CORE; 1900 const unsigned int core = CURRENT_CORE;
1901 struct thread_entry *block = cores[core].block_task;
1613 struct thread_entry *thread = cores[core].running; 1902 struct thread_entry *thread = cores[core].running;
1614 struct thread_entry *block = old;
1615 1903
1616 if (block == NULL) 1904 /* Get context to save - next thread to run is unknown until all wakeups
1617 old = thread; 1905 * are evaluated */
1906 if (block != NULL)
1907 {
1908 cores[core].block_task = NULL;
1909
1910#if NUM_CORES > 1
1911 if (thread == block)
1912 {
1913 /* This was the last thread running and another core woke us before
1914 * reaching here. Force next thread selection to give tmo threads or
1915 * other threads woken before this block a first chance. */
1916 block = NULL;
1917 }
1918 else
1919#endif
1920 {
1921 /* Blocking task is the old one */
1922 thread = block;
1923 }
1924 }
1618 1925
1619#ifdef RB_PROFILE 1926#ifdef RB_PROFILE
1620 profile_thread_stopped(old - threads); 1927 profile_thread_stopped(thread - threads);
1621#endif 1928#endif
1622 1929
1623 /* Begin task switching by saving our current context so that we can 1930 /* Begin task switching by saving our current context so that we can
1624 * restore the state of the current thread later to the point prior 1931 * restore the state of the current thread later to the point prior
1625 * to this call. */ 1932 * to this call. */
1626 store_context(&old->context); 1933 store_context(&thread->context);
1627 1934
1628 /* Check if the current thread stack is overflown */ 1935 /* Check if the current thread stack is overflown */
1629 if(((unsigned int *)old->stack)[0] != DEADBEEF) 1936 if (thread->stack[0] != DEADBEEF)
1630 thread_stkov(old); 1937 thread_stkov(thread);
1631 1938
1632#if NUM_CORES > 1 1939#if NUM_CORES > 1
1633 /* Run any blocking operations requested before switching/sleeping */ 1940 /* Run any blocking operations requested before switching/sleeping */
1634 run_blocking_ops(core, old); 1941 run_blocking_ops(core, thread);
1635#endif 1942#endif
1636 1943
1637 /* Go through the list of sleeping task to check if we need to wake up
1638 * any of them due to timeout. Also puts core into sleep state until
1639 * there is at least one running process again. */
1640 thread = sleep_core(IF_COP(core));
1641
1642#ifdef HAVE_PRIORITY_SCHEDULING 1944#ifdef HAVE_PRIORITY_SCHEDULING
1643 /* Select the new task based on priorities and the last time a process 1945 /* Reset the value of thread's skip count */
1644 * got CPU time. */ 1946 thread->skip_count = 0;
1645 if (block == NULL) 1947#endif
1646 thread = thread->l.next;
1647 1948
1648 for (;;) 1949 for (;;)
1649 { 1950 {
1650 int priority = thread->priority; 1951 /* If there are threads on a timeout and the earliest wakeup is due,
1952 * check the list and wake any threads that need to start running
1953 * again. */
1954 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1955 {
1956 check_tmo_threads();
1957 }
1958
1959 set_irq_level(HIGHEST_IRQ_LEVEL);
1960 RTR_LOCK(core);
1651 1961
1652 if (priority < cores[core].highest_priority) 1962 thread = cores[core].running;
1653 cores[core].highest_priority = priority;
1654 1963
1655 if (priority == cores[core].highest_priority || 1964 if (thread == NULL)
1656 thread->priority_x < cores[core].highest_priority ||
1657 (current_tick - thread->last_run > priority * 8))
1658 { 1965 {
1659 cores[core].running = thread; 1966 /* Enter sleep mode to reduce power usage - woken up on interrupt
1660 break; 1967 * or wakeup request from another core - expected to enable
1968 * interrupts. */
1969 RTR_UNLOCK(core);
1970 core_sleep(IF_COP(core));
1661 } 1971 }
1972 else
1973 {
1974#ifdef HAVE_PRIORITY_SCHEDULING
1975 /* Select the new task based on priorities and the last time a
1976 * process got CPU time relative to the highest priority runnable
1977 * task. */
1978 struct priority_distribution *pd = &cores[core].rtr;
1979 int max = find_first_set_bit(pd->mask);
1662 1980
1663 thread = thread->l.next; 1981 if (block == NULL)
1664 } 1982 {
1665 1983 /* Not switching on a block, tentatively select next thread */
1666 /* Reset the value of thread's last running time to the current time. */ 1984 thread = thread->l.next;
1667 thread->last_run = current_tick; 1985 }
1986
1987 for (;;)
1988 {
1989 int priority = thread->priority;
1990 int diff;
1991
1992 /* This ridiculously simple method of aging seems to work
1993 * suspiciously well. It does tend to reward CPU hogs (under
1994 * yielding) but that's generally not desirable at all. On the
1995 * plus side, it, relatively to other threads, penalizes excess
1996 * yielding which is good if some high priority thread is
1997 * performing no useful work such as polling for a device to be
1998 * ready. Of course, aging is only employed when higher and lower
1999 * priority threads are runnable. The highest priority runnable
2000 * thread(s) are never skipped. */
2001 if (priority <= max ||
2002 (diff = priority - max, ++thread->skip_count > diff*diff))
2003 {
2004 cores[core].running = thread;
2005 break;
2006 }
2007
2008 thread = thread->l.next;
2009 }
1668#else 2010#else
1669 if (block == NULL) 2011 /* Without priority use a simple FCFS algorithm */
1670 { 2012 if (block == NULL)
1671 thread = thread->l.next; 2013 {
1672 cores[core].running = thread; 2014 /* Not switching on a block, select next thread */
1673 } 2015 thread = thread->l.next;
2016 cores[core].running = thread;
2017 }
1674#endif /* HAVE_PRIORITY_SCHEDULING */ 2018#endif /* HAVE_PRIORITY_SCHEDULING */
1675 2019
2020 RTR_UNLOCK(core);
2021 set_irq_level(0);
2022 break;
2023 }
2024 }
2025
1676 /* And finally give control to the next thread. */ 2026 /* And finally give control to the next thread. */
1677 load_context(&thread->context); 2027 load_context(&thread->context);
1678 2028
@@ -1682,314 +2032,210 @@ void switch_thread(struct thread_entry *old)
1682} 2032}
1683 2033
1684/*--------------------------------------------------------------------------- 2034/*---------------------------------------------------------------------------
1685 * Change the boost state of a thread boosting or unboosting the CPU 2035 * Sleeps a thread for at least a specified number of ticks with zero being
1686 * as required. Require thread slot to be locked first. 2036 * a wait until the next tick.
1687 *---------------------------------------------------------------------------
1688 */
1689static inline void boost_thread(struct thread_entry *thread, bool boost)
1690{
1691#ifdef HAVE_SCHEDULER_BOOSTCTRL
1692 if ((thread->boosted != 0) != boost)
1693 {
1694 thread->boosted = boost;
1695 cpu_boost(boost);
1696 }
1697#endif
1698 (void)thread; (void)boost;
1699}
1700
1701/*---------------------------------------------------------------------------
1702 * Sleeps a thread for a specified number of ticks and unboost the thread if
1703 * if it is boosted. If ticks is zero, it does not delay but instead switches
1704 * tasks.
1705 * 2037 *
1706 * INTERNAL: Intended for use by kernel and not for programs. 2038 * INTERNAL: Intended for use by kernel and not for programs.
1707 *--------------------------------------------------------------------------- 2039 *---------------------------------------------------------------------------
1708 */ 2040 */
1709void sleep_thread(int ticks) 2041void sleep_thread(int ticks)
1710{ 2042{
1711 /* Get the entry for the current running thread. */
1712 struct thread_entry *current = cores[CURRENT_CORE].running; 2043 struct thread_entry *current = cores[CURRENT_CORE].running;
1713 2044
1714#if NUM_CORES > 1 2045 LOCK_THREAD(current);
1715 /* Lock thread slot */
1716 GET_THREAD_STATE(current);
1717#endif
1718 2046
1719 /* Set our timeout, change lists, and finally switch threads. 2047 /* Set our timeout, remove from run list and join timeout list. */
1720 * Unlock during switch on mulicore. */
1721 current->tmo_tick = current_tick + ticks + 1; 2048 current->tmo_tick = current_tick + ticks + 1;
1722 block_thread_on_l(NULL, current, STATE_SLEEPING); 2049 block_thread_on_l(current, STATE_SLEEPING);
1723 switch_thread(current);
1724 2050
1725 /* Our status should be STATE_RUNNING */ 2051 UNLOCK_THREAD(current);
1726 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1727 "S:R->!*R", current);
1728} 2052}
1729 2053
1730/*--------------------------------------------------------------------------- 2054/*---------------------------------------------------------------------------
1731 * Indefinitely block a thread on a blocking queue for explicit wakeup. 2055 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1732 * Caller with interrupt-accessible lists should disable interrupts first
1733 * and request a BOP_IRQ_LEVEL blocking operation to reset it.
1734 * 2056 *
1735 * INTERNAL: Intended for use by kernel objects and not for programs. 2057 * INTERNAL: Intended for use by kernel objects and not for programs.
1736 *--------------------------------------------------------------------------- 2058 *---------------------------------------------------------------------------
1737 */ 2059 */
1738IF_SWCL(static inline) void _block_thread(struct thread_queue *list 2060void block_thread(struct thread_entry *current)
1739 IF_SWCL(, const bool nolock))
1740{ 2061{
1741 /* Get the entry for the current running thread. */ 2062 /* Set the state to blocked and take us off of the run queue until we
1742 struct thread_entry *current = cores[CURRENT_CORE].running; 2063 * are explicitly woken */
1743 2064 LOCK_THREAD(current);
1744 /* Set the state to blocked and ask the scheduler to switch tasks,
1745 * this takes us off of the run queue until we are explicitly woken */
1746 2065
1747#if NUM_CORES > 1 2066 /* Set the list for explicit wakeup */
1748 /* Lock thread slot */ 2067 block_thread_on_l(current, STATE_BLOCKED);
1749 GET_THREAD_STATE(current);
1750#endif
1751 2068
1752#if CONFIG_CORELOCK == SW_CORELOCK 2069#ifdef HAVE_PRIORITY_SCHEDULING
1753 /* One branch optimized away during inlining */ 2070 if (current->blocker != NULL)
1754 if (nolock)
1755 { 2071 {
1756 block_thread_on_l_no_listlock((struct thread_entry **)list, 2072 /* Object supports PIP */
1757 current, STATE_BLOCKED); 2073 current = blocker_inherit_priority(current);
1758 } 2074 }
1759 else
1760#endif 2075#endif
1761 {
1762 block_thread_on_l(list, current, STATE_BLOCKED);
1763 }
1764
1765 switch_thread(current);
1766
1767 /* Our status should be STATE_RUNNING */
1768 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1769 "B:R->!*R", current);
1770}
1771
1772#if CONFIG_CORELOCK == SW_CORELOCK
1773/* Inline lock/nolock version of _block_thread into these functions */
1774void block_thread(struct thread_queue *tq)
1775{
1776 _block_thread(tq, false);
1777}
1778 2076
1779void block_thread_no_listlock(struct thread_entry **list) 2077 UNLOCK_THREAD(current);
1780{
1781 _block_thread((struct thread_queue *)list, true);
1782} 2078}
1783#endif /* CONFIG_CORELOCK */
1784 2079
1785/*--------------------------------------------------------------------------- 2080/*---------------------------------------------------------------------------
1786 * Block a thread on a blocking queue for a specified time interval or until 2081 * Block a thread on a blocking queue for a specified time interval or until
1787 * explicitly woken - whichever happens first. 2082 * explicitly woken - whichever happens first.
1788 * Caller with interrupt-accessible lists should disable interrupts first
1789 * and request that interrupt level be restored after switching out the
1790 * current thread.
1791 * 2083 *
1792 * INTERNAL: Intended for use by kernel objects and not for programs. 2084 * INTERNAL: Intended for use by kernel objects and not for programs.
1793 *--------------------------------------------------------------------------- 2085 *---------------------------------------------------------------------------
1794 */ 2086 */
1795void block_thread_w_tmo(struct thread_queue *list, int timeout) 2087void block_thread_w_tmo(struct thread_entry *current, int timeout)
1796{ 2088{
1797 /* Get the entry for the current running thread. */ 2089 /* Get the entry for the current running thread. */
1798 struct thread_entry *current = cores[CURRENT_CORE].running; 2090 LOCK_THREAD(current);
1799
1800#if NUM_CORES > 1
1801 /* Lock thread slot */
1802 GET_THREAD_STATE(current);
1803#endif
1804 2091
1805 /* Set the state to blocked with the specified timeout */ 2092 /* Set the state to blocked with the specified timeout */
1806 current->tmo_tick = current_tick + timeout; 2093 current->tmo_tick = current_tick + timeout;
2094
1807 /* Set the list for explicit wakeup */ 2095 /* Set the list for explicit wakeup */
1808 block_thread_on_l(list, current, STATE_BLOCKED_W_TMO); 2096 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1809 2097
1810 /* Now force a task switch and block until we have been woken up 2098#ifdef HAVE_PRIORITY_SCHEDULING
1811 * by another thread or timeout is reached - whichever happens first */ 2099 if (current->blocker != NULL)
1812 switch_thread(current); 2100 {
2101 /* Object supports PIP */
2102 current = blocker_inherit_priority(current);
2103 }
2104#endif
1813 2105
1814 /* Our status should be STATE_RUNNING */ 2106 UNLOCK_THREAD(current);
1815 THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
1816 "T:R->!*R", current);
1817} 2107}
1818 2108
1819/*--------------------------------------------------------------------------- 2109/*---------------------------------------------------------------------------
1820 * Explicitly wakeup a thread on a blocking queue. Has no effect on threads 2110 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1821 * that called sleep(). 2111 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1822 * Caller with interrupt-accessible lists should disable interrupts first. 2112 *
1823 * This code should be considered a critical section by the caller. 2113 * This code should be considered a critical section by the caller meaning
2114 * that the object's corelock should be held.
1824 * 2115 *
1825 * INTERNAL: Intended for use by kernel objects and not for programs. 2116 * INTERNAL: Intended for use by kernel objects and not for programs.
1826 *--------------------------------------------------------------------------- 2117 *---------------------------------------------------------------------------
1827 */ 2118 */
1828IF_SWCL(static inline) struct thread_entry * _wakeup_thread( 2119unsigned int wakeup_thread(struct thread_entry **list)
1829 struct thread_queue *list IF_SWCL(, const bool nolock))
1830{ 2120{
1831 struct thread_entry *t; 2121 struct thread_entry *thread = *list;
1832 struct thread_entry *thread; 2122 unsigned int result = THREAD_NONE;
1833 unsigned state;
1834
1835 /* Wake up the last thread first. */
1836#if CONFIG_CORELOCK == SW_CORELOCK
1837 /* One branch optimized away during inlining */
1838 if (nolock)
1839 {
1840 t = list->queue;
1841 }
1842 else
1843#endif
1844 {
1845 t = LOCK_LIST(list);
1846 }
1847 2123
1848 /* Check if there is a blocked thread at all. */ 2124 /* Check if there is a blocked thread at all. */
1849 if (t == NULL) 2125 if (thread == NULL)
1850 { 2126 return result;
1851#if CONFIG_CORELOCK == SW_CORELOCK
1852 if (!nolock)
1853#endif
1854 {
1855 UNLOCK_LIST(list, NULL);
1856 }
1857 return NULL;
1858 }
1859 2127
1860 thread = t; 2128 LOCK_THREAD(thread);
1861
1862#if NUM_CORES > 1
1863#if CONFIG_CORELOCK == SW_CORELOCK
1864 if (nolock)
1865 {
1866 /* Lock thread only, not list */
1867 state = GET_THREAD_STATE(thread);
1868 }
1869 else
1870#endif
1871 {
1872 /* This locks in reverse order from other routines so a retry in the
1873 correct order may be needed */
1874 state = TRY_GET_THREAD_STATE(thread);
1875 if (state == STATE_BUSY)
1876 {
1877 /* Unlock list and retry slot, then list */
1878 UNLOCK_LIST(list, t);
1879 state = GET_THREAD_STATE(thread);
1880 t = LOCK_LIST(list);
1881 /* Be sure thread still exists here - it couldn't have re-added
1882 itself if it was woken elsewhere because this function is
1883 serialized within the object that owns the list. */
1884 if (thread != t)
1885 {
1886 /* Thread disappeared :( */
1887 UNLOCK_LIST(list, t);
1888 UNLOCK_THREAD(thread, state);
1889 return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
1890 }
1891 }
1892 }
1893#else /* NUM_CORES == 1 */
1894 state = GET_THREAD_STATE(thread);
1895#endif /* NUM_CORES */
1896 2129
1897 /* Determine thread's current state. */ 2130 /* Determine thread's current state. */
1898 switch (state) 2131 switch (thread->state)
1899 { 2132 {
1900 case STATE_BLOCKED: 2133 case STATE_BLOCKED:
1901 case STATE_BLOCKED_W_TMO: 2134 case STATE_BLOCKED_W_TMO:
1902 /* Remove thread from object's blocked list - select t or list depending 2135 remove_from_list_l(list, thread);
1903 on locking type at compile time */ 2136
1904 REMOVE_FROM_LIST_L_SELECT(t, list, thread); 2137 result = THREAD_OK;
1905#if CONFIG_CORELOCK == SW_CORELOCK 2138
1906 /* Statment optimized away during inlining if nolock != false */ 2139#ifdef HAVE_PRIORITY_SCHEDULING
1907 if (!nolock) 2140 struct thread_entry *current;
1908#endif 2141 struct blocker *bl = thread->blocker;
2142
2143 if (bl == NULL)
1909 { 2144 {
1910 UNLOCK_LIST(list, t); /* Unlock list - removal complete */ 2145 /* No inheritance - just boost the thread by aging */
2146 thread->skip_count = thread->priority;
2147 current = cores[CURRENT_CORE].running;
2148 }
2149 else
2150 {
2151 /* Call the specified unblocking PIP */
2152 current = bl->wakeup_protocol(thread);
1911 } 2153 }
1912 2154
1913#ifdef HAVE_PRIORITY_SCHEDULING 2155 if (current != NULL && thread->priority < current->priority
1914 /* Give the task a kick to avoid a stall after wakeup. 2156 IF_COP( && thread->core == current->core ))
1915 Not really proper treatment - TODO later. */ 2157 {
1916 thread->last_run = current_tick - 8*LOWEST_PRIORITY; 2158 /* Woken thread is higher priority and exists on the same CPU core;
1917#endif 2159 * recommend a task switch. Knowing if this is an interrupt call
2160 * would be helpful here. */
2161 result |= THREAD_SWITCH;
2162 }
2163#endif /* HAVE_PRIORITY_SCHEDULING */
2164
1918 core_schedule_wakeup(thread); 2165 core_schedule_wakeup(thread);
1919 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); 2166 break;
1920 return thread; 2167
1921 default: 2168 /* Nothing to do. State is not blocked. */
1922 /* Nothing to do. State is not blocked. */
1923#if THREAD_EXTRA_CHECKS 2169#if THREAD_EXTRA_CHECKS
2170 default:
1924 THREAD_PANICF("wakeup_thread->block invalid", thread); 2171 THREAD_PANICF("wakeup_thread->block invalid", thread);
1925 case STATE_RUNNING: 2172 case STATE_RUNNING:
1926 case STATE_KILLED: 2173 case STATE_KILLED:
2174 break;
1927#endif 2175#endif
1928#if CONFIG_CORELOCK == SW_CORELOCK
1929 /* Statement optimized away during inlining if nolock != false */
1930 if (!nolock)
1931#endif
1932 {
1933 UNLOCK_LIST(list, t); /* Unlock the object's list */
1934 }
1935 UNLOCK_THREAD(thread, state); /* Unlock thread slot */
1936 return NULL;
1937 } 2176 }
1938}
1939 2177
1940#if CONFIG_CORELOCK == SW_CORELOCK 2178 UNLOCK_THREAD(thread);
1941/* Inline lock/nolock version of _wakeup_thread into these functions */ 2179 return result;
1942struct thread_entry * wakeup_thread(struct thread_queue *tq)
1943{
1944 return _wakeup_thread(tq, false);
1945} 2180}
1946 2181
1947struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list) 2182/*---------------------------------------------------------------------------
2183 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
2184 * from each operation or THREAD_NONE of nothing was awakened. Object owning
2185 * the queue must be locked first.
2186 *
2187 * INTERNAL: Intended for use by kernel objects and not for programs.
2188 *---------------------------------------------------------------------------
2189 */
2190unsigned int thread_queue_wake(struct thread_entry **list)
1948{ 2191{
1949 return _wakeup_thread((struct thread_queue *)list, true); 2192 unsigned result = THREAD_NONE;
2193
2194 for (;;)
2195 {
2196 unsigned int rc = wakeup_thread(list);
2197
2198 if (rc == THREAD_NONE)
2199 break; /* No more threads */
2200
2201 result |= rc;
2202 }
2203
2204 return result;
1950} 2205}
1951#endif /* CONFIG_CORELOCK */
1952 2206
1953/*--------------------------------------------------------------------------- 2207/*---------------------------------------------------------------------------
1954 * Find an empty thread slot or MAXTHREADS if none found. The slot returned 2208 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1955 * will be locked on multicore. 2209 * will be locked on multicore.
1956 *--------------------------------------------------------------------------- 2210 *---------------------------------------------------------------------------
1957 */ 2211 */
1958static int find_empty_thread_slot(void) 2212static struct thread_entry * find_empty_thread_slot(void)
1959{ 2213{
1960#if NUM_CORES > 1 2214 /* Any slot could be on an interrupt-accessible list */
1961 /* Any slot could be on an IRQ-accessible list */ 2215 IF_COP( int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); )
1962 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2216 struct thread_entry *thread = NULL;
1963#endif
1964 /* Thread slots are not locked on single core */
1965
1966 int n; 2217 int n;
1967 2218
1968 for (n = 0; n < MAXTHREADS; n++) 2219 for (n = 0; n < MAXTHREADS; n++)
1969 { 2220 {
1970 /* Obtain current slot state - lock it on multicore */ 2221 /* Obtain current slot state - lock it on multicore */
1971 unsigned state = GET_THREAD_STATE(&threads[n]); 2222 struct thread_entry *t = &threads[n];
2223 LOCK_THREAD(t);
1972 2224
1973 if (state == STATE_KILLED 2225 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1974#if NUM_CORES > 1
1975 && threads[n].name != THREAD_DESTRUCT
1976#endif
1977 )
1978 { 2226 {
1979 /* Slot is empty - leave it locked and caller will unlock */ 2227 /* Slot is empty - leave it locked and caller will unlock */
2228 thread = t;
1980 break; 2229 break;
1981 } 2230 }
1982 2231
1983 /* Finished examining slot - no longer busy - unlock on multicore */ 2232 /* Finished examining slot - no longer busy - unlock on multicore */
1984 UNLOCK_THREAD(&threads[n], state); 2233 UNLOCK_THREAD(t);
1985 } 2234 }
1986 2235
1987#if NUM_CORES > 1 2236 IF_COP( set_irq_level(oldlevel); ) /* Reenable interrups - this slot is
1988 set_irq_level(oldlevel); /* Reenable interrups - this slot is 2237 not accesible to them yet */
1989 not accesible to them yet */ 2238 return thread;
1990#endif
1991
1992 return n;
1993} 2239}
1994 2240
1995 2241
@@ -2000,65 +2246,68 @@ static int find_empty_thread_slot(void)
2000 */ 2246 */
2001void core_idle(void) 2247void core_idle(void)
2002{ 2248{
2003#if NUM_CORES > 1 2249 IF_COP( const unsigned int core = CURRENT_CORE; )
2004 const unsigned int core = CURRENT_CORE;
2005#endif
2006 set_irq_level(HIGHEST_IRQ_LEVEL); 2250 set_irq_level(HIGHEST_IRQ_LEVEL);
2007 core_sleep(IF_COP(core)); 2251 core_sleep(IF_COP(core));
2008} 2252}
2009 2253
2010/*--------------------------------------------------------------------------- 2254/*---------------------------------------------------------------------------
2011 * Create a thread 2255 * Create a thread. If using a dual core architecture, specify which core to
2012 * If using a dual core architecture, specify which core to start the thread 2256 * start the thread on.
2013 * on, and whether to fall back to the other core if it can't be created 2257 *
2014 * Return ID if context area could be allocated, else NULL. 2258 * Return ID if context area could be allocated, else NULL.
2015 *--------------------------------------------------------------------------- 2259 *---------------------------------------------------------------------------
2016 */ 2260 */
2017struct thread_entry* 2261struct thread_entry*
2018 create_thread(void (*function)(void), void* stack, int stack_size, 2262 create_thread(void (*function)(void), void* stack, size_t stack_size,
2019 unsigned flags, const char *name 2263 unsigned flags, const char *name
2020 IF_PRIO(, int priority) 2264 IF_PRIO(, int priority)
2021 IF_COP(, unsigned int core)) 2265 IF_COP(, unsigned int core))
2022{ 2266{
2023 unsigned int i; 2267 unsigned int i;
2024 unsigned int stacklen; 2268 unsigned int stack_words;
2025 unsigned int *stackptr; 2269 uintptr_t stackptr, stackend;
2026 int slot;
2027 struct thread_entry *thread; 2270 struct thread_entry *thread;
2028 unsigned state; 2271 unsigned state;
2272 int oldlevel;
2029 2273
2030 slot = find_empty_thread_slot(); 2274 thread = find_empty_thread_slot();
2031 if (slot >= MAXTHREADS) 2275 if (thread == NULL)
2032 { 2276 {
2033 return NULL; 2277 return NULL;
2034 } 2278 }
2035 2279
2280 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2281
2036 /* Munge the stack to make it easy to spot stack overflows */ 2282 /* Munge the stack to make it easy to spot stack overflows */
2037 stacklen = stack_size / sizeof(int); 2283 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
2038 stackptr = stack; 2284 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
2039 for(i = 0;i < stacklen;i++) 2285 stack_size = stackend - stackptr;
2286 stack_words = stack_size / sizeof (uintptr_t);
2287
2288 for (i = 0; i < stack_words; i++)
2040 { 2289 {
2041 stackptr[i] = DEADBEEF; 2290 ((uintptr_t *)stackptr)[i] = DEADBEEF;
2042 } 2291 }
2043 2292
2044 /* Store interesting information */ 2293 /* Store interesting information */
2045 thread = &threads[slot];
2046 thread->name = name; 2294 thread->name = name;
2047 thread->stack = stack; 2295 thread->stack = (uintptr_t *)stackptr;
2048 thread->stack_size = stack_size; 2296 thread->stack_size = stack_size;
2049 thread->bqp = NULL;
2050#if CONFIG_CORELOCK == SW_CORELOCK
2051 thread->bqnlp = NULL;
2052#endif
2053 thread->queue = NULL; 2297 thread->queue = NULL;
2298#ifdef HAVE_WAKEUP_EXT_CB
2299 thread->wakeup_ext_cb = NULL;
2300#endif
2054#ifdef HAVE_SCHEDULER_BOOSTCTRL 2301#ifdef HAVE_SCHEDULER_BOOSTCTRL
2055 thread->boosted = 0; 2302 thread->cpu_boost = 0;
2056#endif 2303#endif
2057#ifdef HAVE_PRIORITY_SCHEDULING 2304#ifdef HAVE_PRIORITY_SCHEDULING
2058 thread->priority_x = LOWEST_PRIORITY; 2305 memset(&thread->pdist, 0, sizeof(thread->pdist));
2306 thread->blocker = NULL;
2307 thread->base_priority = priority;
2059 thread->priority = priority; 2308 thread->priority = priority;
2060 thread->last_run = current_tick - priority * 8; 2309 thread->skip_count = priority;
2061 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY; 2310 prio_add_entry(&thread->pdist, priority);
2062#endif 2311#endif
2063 2312
2064#if NUM_CORES > 1 2313#if NUM_CORES > 1
@@ -2077,70 +2326,160 @@ struct thread_entry*
2077 state = (flags & CREATE_THREAD_FROZEN) ? 2326 state = (flags & CREATE_THREAD_FROZEN) ?
2078 STATE_FROZEN : STATE_RUNNING; 2327 STATE_FROZEN : STATE_RUNNING;
2079 2328
2080 /* Align stack to an even 32 bit boundary */ 2329 thread->context.sp = (typeof (thread->context.sp))stackend;
2081 thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
2082 2330
2083 /* Load the thread's context structure with needed startup information */ 2331 /* Load the thread's context structure with needed startup information */
2084 THREAD_STARTUP_INIT(core, thread, function); 2332 THREAD_STARTUP_INIT(core, thread, function);
2085 2333
2334 thread->state = state;
2335
2086 if (state == STATE_RUNNING) 2336 if (state == STATE_RUNNING)
2087 { 2337 core_schedule_wakeup(thread);
2088#if NUM_CORES > 1 2338
2089 if (core != CURRENT_CORE) 2339 UNLOCK_THREAD(thread);
2090 { 2340
2091 /* Next task switch on other core moves thread to running list */ 2341 set_irq_level(oldlevel);
2092 core_schedule_wakeup(thread);
2093 }
2094 else
2095#endif
2096 {
2097 /* Place on running list immediately */
2098 add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
2099 }
2100 }
2101 2342
2102 /* remove lock and set state */
2103 UNLOCK_THREAD_SET_STATE(thread, state);
2104
2105 return thread; 2343 return thread;
2106} 2344}
2107 2345
2108#ifdef HAVE_SCHEDULER_BOOSTCTRL 2346#ifdef HAVE_SCHEDULER_BOOSTCTRL
2347/*---------------------------------------------------------------------------
2348 * Change the boost state of a thread boosting or unboosting the CPU
2349 * as required.
2350 *---------------------------------------------------------------------------
2351 */
2352static inline void boost_thread(struct thread_entry *thread, bool boost)
2353{
2354 if ((thread->cpu_boost != 0) != boost)
2355 {
2356 thread->cpu_boost = boost;
2357 cpu_boost(boost);
2358 }
2359}
2360
2109void trigger_cpu_boost(void) 2361void trigger_cpu_boost(void)
2110{ 2362{
2111 /* No IRQ disable nescessary since the current thread cannot be blocked
2112 on an IRQ-accessible list */
2113 struct thread_entry *current = cores[CURRENT_CORE].running; 2363 struct thread_entry *current = cores[CURRENT_CORE].running;
2114 unsigned state;
2115
2116 state = GET_THREAD_STATE(current);
2117 boost_thread(current, true); 2364 boost_thread(current, true);
2118 UNLOCK_THREAD(current, state);
2119
2120 (void)state;
2121} 2365}
2122 2366
2123void cancel_cpu_boost(void) 2367void cancel_cpu_boost(void)
2124{ 2368{
2125 struct thread_entry *current = cores[CURRENT_CORE].running; 2369 struct thread_entry *current = cores[CURRENT_CORE].running;
2126 unsigned state;
2127
2128 state = GET_THREAD_STATE(current);
2129 boost_thread(current, false); 2370 boost_thread(current, false);
2130 UNLOCK_THREAD(current, state);
2131
2132 (void)state;
2133} 2371}
2134#endif /* HAVE_SCHEDULER_BOOSTCTRL */ 2372#endif /* HAVE_SCHEDULER_BOOSTCTRL */
2135 2373
2136/*--------------------------------------------------------------------------- 2374/*---------------------------------------------------------------------------
2137 * Remove a thread from the scheduler. 2375 * Block the current thread until another thread terminates. A thread may
2376 * wait on itself to terminate which prevents it from running again and it
2377 * will need to be killed externally.
2378 * Parameter is the ID as returned from create_thread().
2379 *---------------------------------------------------------------------------
2380 */
2381void thread_wait(struct thread_entry *thread)
2382{
2383 struct thread_entry *current = cores[CURRENT_CORE].running;
2384
2385 if (thread == NULL)
2386 thread = current;
2387
2388 /* Lock thread-as-waitable-object lock */
2389 corelock_lock(&thread->waiter_cl);
2390
2391 /* Be sure it hasn't been killed yet */
2392 if (thread->state != STATE_KILLED)
2393 {
2394 IF_COP( current->obj_cl = &thread->waiter_cl; )
2395 current->bqp = &thread->queue;
2396
2397 set_irq_level(HIGHEST_IRQ_LEVEL);
2398 block_thread(current);
2399
2400 corelock_unlock(&thread->waiter_cl);
2401
2402 switch_thread();
2403 return;
2404 }
2405
2406 corelock_unlock(&thread->waiter_cl);
2407}
2408
2409/*---------------------------------------------------------------------------
2410 * Exit the current thread. The Right Way to Do Things (TM).
2411 *---------------------------------------------------------------------------
2412 */
2413void thread_exit(void)
2414{
2415 const unsigned int core = CURRENT_CORE;
2416 struct thread_entry *current = cores[core].running;
2417
2418 /* Cancel CPU boost if any */
2419 cancel_cpu_boost();
2420
2421 set_irq_level(HIGHEST_IRQ_LEVEL);
2422
2423 corelock_lock(&current->waiter_cl);
2424 LOCK_THREAD(current);
2425
2426#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
2427 if (current->name == THREAD_DESTRUCT)
2428 {
2429 /* Thread being killed - become a waiter */
2430 UNLOCK_THREAD(current);
2431 corelock_unlock(&current->waiter_cl);
2432 thread_wait(current);
2433 THREAD_PANICF("thread_exit->WK:*R", current);
2434 }
2435#endif
2436
2437#ifdef HAVE_PRIORITY_SCHEDULING
2438 check_for_obj_waiters("thread_exit", current);
2439#endif
2440
2441 if (current->tmo.prev != NULL)
2442 {
2443 /* Cancel pending timeout list removal */
2444 remove_from_list_tmo(current);
2445 }
2446
2447 /* Switch tasks and never return */
2448 block_thread_on_l(current, STATE_KILLED);
2449
2450#if NUM_CORES > 1
2451 /* Switch to the idle stack if not on the main core (where "main"
2452 * runs) - we can hope gcc doesn't need the old stack beyond this
2453 * point. */
2454 if (core != CPU)
2455 {
2456 switch_to_idle_stack(core);
2457 }
2458
2459 flush_icache();
2460#endif
2461 current->name = NULL;
2462
2463 /* Signal this thread */
2464 thread_queue_wake(&current->queue);
2465 corelock_unlock(&current->waiter_cl);
2466 /* Slot must be unusable until thread is really gone */
2467 UNLOCK_THREAD_AT_TASK_SWITCH(current);
2468 switch_thread();
2469 /* This should never and must never be reached - if it is, the
2470 * state is corrupted */
2471 THREAD_PANICF("thread_exit->K:*R", current);
2472}
2473
2474#ifdef ALLOW_REMOVE_THREAD
2475/*---------------------------------------------------------------------------
2476 * Remove a thread from the scheduler. Not The Right Way to Do Things in
2477 * normal programs.
2478 *
2138 * Parameter is the ID as returned from create_thread(). 2479 * Parameter is the ID as returned from create_thread().
2139 * 2480 *
2140 * Use with care on threads that are not under careful control as this may 2481 * Use with care on threads that are not under careful control as this may
2141 * leave various objects in an undefined state. When trying to kill a thread 2482 * leave various objects in an undefined state.
2142 * on another processor, be sure you know what it's doing and won't be
2143 * switching around itself.
2144 *--------------------------------------------------------------------------- 2483 *---------------------------------------------------------------------------
2145 */ 2484 */
2146void remove_thread(struct thread_entry *thread) 2485void remove_thread(struct thread_entry *thread)
@@ -2149,17 +2488,27 @@ void remove_thread(struct thread_entry *thread)
2149 /* core is not constant here because of core switching */ 2488 /* core is not constant here because of core switching */
2150 unsigned int core = CURRENT_CORE; 2489 unsigned int core = CURRENT_CORE;
2151 unsigned int old_core = NUM_CORES; 2490 unsigned int old_core = NUM_CORES;
2491 struct corelock *ocl = NULL;
2152#else 2492#else
2153 const unsigned int core = CURRENT_CORE; 2493 const unsigned int core = CURRENT_CORE;
2154#endif 2494#endif
2495 struct thread_entry *current = cores[core].running;
2496
2155 unsigned state; 2497 unsigned state;
2156 int oldlevel; 2498 int oldlevel;
2157 2499
2158 if (thread == NULL) 2500 if (thread == NULL)
2159 thread = cores[core].running; 2501 thread = current;
2502
2503 if (thread == current)
2504 thread_exit(); /* Current thread - do normal exit */
2160 2505
2161 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2506 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2162 state = GET_THREAD_STATE(thread); 2507
2508 corelock_lock(&thread->waiter_cl);
2509 LOCK_THREAD(thread);
2510
2511 state = thread->state;
2163 2512
2164 if (state == STATE_KILLED) 2513 if (state == STATE_KILLED)
2165 { 2514 {
@@ -2167,50 +2516,49 @@ void remove_thread(struct thread_entry *thread)
2167 } 2516 }
2168 2517
2169#if NUM_CORES > 1 2518#if NUM_CORES > 1
2519 if (thread->name == THREAD_DESTRUCT)
2520 {
2521 /* Thread being killed - become a waiter */
2522 UNLOCK_THREAD(thread);
2523 corelock_unlock(&thread->waiter_cl);
2524 set_irq_level(oldlevel);
2525 thread_wait(thread);
2526 return;
2527 }
2528
2529 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
2530
2531#ifdef HAVE_PRIORITY_SCHEDULING
2532 check_for_obj_waiters("remove_thread", thread);
2533#endif
2534
2170 if (thread->core != core) 2535 if (thread->core != core)
2171 { 2536 {
2172 /* Switch cores and safely extract the thread there */ 2537 /* Switch cores and safely extract the thread there */
2173 /* Slot HAS to be unlocked or a deadlock could occur - potential livelock 2538 /* Slot HAS to be unlocked or a deadlock could occur which means other
2174 condition if the thread runs away to another processor. */ 2539 * threads have to be guided into becoming thread waiters if they
2540 * attempt to remove it. */
2175 unsigned int new_core = thread->core; 2541 unsigned int new_core = thread->core;
2176 const char *old_name = thread->name;
2177 2542
2178 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */ 2543 corelock_unlock(&thread->waiter_cl);
2179 UNLOCK_THREAD(thread, state); 2544
2545 UNLOCK_THREAD(thread);
2180 set_irq_level(oldlevel); 2546 set_irq_level(oldlevel);
2181 2547
2182 old_core = switch_core(new_core); 2548 old_core = switch_core(new_core);
2183 2549
2184 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2550 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2185 state = GET_THREAD_STATE(thread);
2186
2187 core = new_core;
2188
2189 if (state == STATE_KILLED)
2190 {
2191 /* Thread suicided before we could kill it */
2192 goto thread_killed;
2193 }
2194
2195 /* Reopen slot - it's locked again anyway */
2196 thread->name = old_name;
2197 2551
2198 if (thread->core != core) 2552 corelock_lock(&thread->waiter_cl);
2199 { 2553 LOCK_THREAD(thread);
2200 /* We won't play thread tag - just forget it */
2201 UNLOCK_THREAD(thread, state);
2202 set_irq_level(oldlevel);
2203 goto thread_kill_abort;
2204 }
2205 2554
2555 state = thread->state;
2556 core = new_core;
2206 /* Perform the extraction and switch ourselves back to the original 2557 /* Perform the extraction and switch ourselves back to the original
2207 processor */ 2558 processor */
2208 } 2559 }
2209#endif /* NUM_CORES > 1 */ 2560#endif /* NUM_CORES > 1 */
2210 2561
2211#ifdef HAVE_PRIORITY_SCHEDULING
2212 cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
2213#endif
2214 if (thread->tmo.prev != NULL) 2562 if (thread->tmo.prev != NULL)
2215 { 2563 {
2216 /* Clean thread off the timeout list if a timeout check hasn't 2564 /* Clean thread off the timeout list if a timeout check hasn't
@@ -2218,87 +2566,86 @@ void remove_thread(struct thread_entry *thread)
2218 remove_from_list_tmo(thread); 2566 remove_from_list_tmo(thread);
2219 } 2567 }
2220 2568
2569#ifdef HAVE_SCHEDULER_BOOSTCTRL
2570 /* Cancel CPU boost if any */
2221 boost_thread(thread, false); 2571 boost_thread(thread, false);
2222
2223 if (thread == cores[core].running)
2224 {
2225 /* Suicide - thread has unconditional rights to do this */
2226 /* Maintain locks until switch-out */
2227 block_thread_on_l(NULL, thread, STATE_KILLED);
2228
2229#if NUM_CORES > 1
2230 /* Switch to the idle stack if not on the main core (where "main"
2231 * runs) */
2232 if (core != CPU)
2233 {
2234 switch_to_idle_stack(core);
2235 }
2236
2237 flush_icache();
2238#endif 2572#endif
2239 /* Signal this thread */
2240 thread_queue_wake_no_listlock(&thread->queue);
2241 /* Switch tasks and never return */
2242 switch_thread(thread);
2243 /* This should never and must never be reached - if it is, the
2244 * state is corrupted */
2245 THREAD_PANICF("remove_thread->K:*R", thread);
2246 }
2247 2573
2248#if NUM_CORES > 1 2574IF_COP( retry_state: )
2249 if (thread->name == THREAD_DESTRUCT)
2250 {
2251 /* Another core is doing this operation already */
2252 UNLOCK_THREAD(thread, state);
2253 set_irq_level(oldlevel);
2254 return;
2255 }
2256#endif
2257 if (cores[core].waking.queue != NULL)
2258 {
2259 /* Get any threads off the waking list and onto the running
2260 * list first - waking and running cannot be distinguished by
2261 * state */
2262 core_perform_wakeup(IF_COP(core));
2263 }
2264 2575
2265 switch (state) 2576 switch (state)
2266 { 2577 {
2267 case STATE_RUNNING: 2578 case STATE_RUNNING:
2579 RTR_LOCK(core);
2268 /* Remove thread from ready to run tasks */ 2580 /* Remove thread from ready to run tasks */
2269 remove_from_list_l(&cores[core].running, thread); 2581 remove_from_list_l(&cores[core].running, thread);
2582 rtr_subtract_entry(core, thread->priority);
2583 RTR_UNLOCK(core);
2270 break; 2584 break;
2271 case STATE_BLOCKED: 2585 case STATE_BLOCKED:
2272 case STATE_BLOCKED_W_TMO: 2586 case STATE_BLOCKED_W_TMO:
2273 /* Remove thread from the queue it's blocked on - including its 2587 /* Remove thread from the queue it's blocked on - including its
2274 * own if waiting there */ 2588 * own if waiting there */
2275#if CONFIG_CORELOCK == SW_CORELOCK 2589#if NUM_CORES > 1
2276 /* One or the other will be valid */ 2590 if (&thread->waiter_cl != thread->obj_cl)
2277 if (thread->bqp == NULL)
2278 { 2591 {
2279 remove_from_list_l(thread->bqnlp, thread); 2592 ocl = thread->obj_cl;
2593
2594 if (corelock_try_lock(ocl) == 0)
2595 {
2596 UNLOCK_THREAD(thread);
2597 corelock_lock(ocl);
2598 LOCK_THREAD(thread);
2599
2600 if (thread->state != state)
2601 {
2602 /* Something woke the thread */
2603 state = thread->state;
2604 corelock_unlock(ocl);
2605 goto retry_state;
2606 }
2607 }
2280 } 2608 }
2281 else 2609#endif
2282#endif /* CONFIG_CORELOCK */ 2610 remove_from_list_l(thread->bqp, thread);
2611
2612#ifdef HAVE_WAKEUP_EXT_CB
2613 if (thread->wakeup_ext_cb != NULL)
2614 thread->wakeup_ext_cb(thread);
2615#endif
2616
2617#ifdef HAVE_PRIORITY_SCHEDULING
2618 if (thread->blocker != NULL)
2283 { 2619 {
2284 remove_from_list_l_locked(thread->bqp, thread); 2620 /* Remove thread's priority influence from its chain */
2621 wakeup_priority_protocol_release(thread);
2285 } 2622 }
2623#endif
2624
2625#if NUM_CORES > 1
2626 if (ocl != NULL)
2627 corelock_unlock(ocl);
2628#endif
2286 break; 2629 break;
2287 /* Otherwise thread is killed or is frozen and hasn't run yet */ 2630 /* Otherwise thread is frozen and hasn't run yet */
2288 } 2631 }
2289 2632
2633 thread->state = STATE_KILLED;
2634
2290 /* If thread was waiting on itself, it will have been removed above. 2635 /* If thread was waiting on itself, it will have been removed above.
2291 * The wrong order would result in waking the thread first and deadlocking 2636 * The wrong order would result in waking the thread first and deadlocking
2292 * since the slot is already locked. */ 2637 * since the slot is already locked. */
2293 thread_queue_wake_no_listlock(&thread->queue); 2638 thread_queue_wake(&thread->queue);
2639
2640 thread->name = NULL;
2294 2641
2295thread_killed: /* Thread was already killed */ 2642thread_killed: /* Thread was already killed */
2296 /* Removal complete - safe to unlock state and reenable interrupts */ 2643 /* Removal complete - safe to unlock and reenable interrupts */
2297 UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED); 2644 corelock_unlock(&thread->waiter_cl);
2645 UNLOCK_THREAD(thread);
2298 set_irq_level(oldlevel); 2646 set_irq_level(oldlevel);
2299 2647
2300#if NUM_CORES > 1 2648#if NUM_CORES > 1
2301thread_kill_abort: /* Something stopped us from killing the thread */
2302 if (old_core < NUM_CORES) 2649 if (old_core < NUM_CORES)
2303 { 2650 {
2304 /* Did a removal on another processor's thread - switch back to 2651 /* Did a removal on another processor's thread - switch back to
@@ -2307,114 +2654,147 @@ thread_kill_abort: /* Something stopped us from killing the thread */
2307 } 2654 }
2308#endif 2655#endif
2309} 2656}
2657#endif /* ALLOW_REMOVE_THREAD */
2310 2658
2659#ifdef HAVE_PRIORITY_SCHEDULING
2311/*--------------------------------------------------------------------------- 2660/*---------------------------------------------------------------------------
2312 * Block the current thread until another thread terminates. A thread may 2661 * Sets the thread's relative base priority for the core it runs on. Any
2313 * wait on itself to terminate which prevents it from running again and it 2662 * needed inheritance changes also may happen.
2314 * will need to be killed externally.
2315 * Parameter is the ID as returned from create_thread().
2316 *--------------------------------------------------------------------------- 2663 *---------------------------------------------------------------------------
2317 */ 2664 */
2318void thread_wait(struct thread_entry *thread) 2665int thread_set_priority(struct thread_entry *thread, int priority)
2319{ 2666{
2320 const unsigned int core = CURRENT_CORE; 2667 int old_base_priority = -1;
2321 struct thread_entry *current = cores[core].running; 2668
2322 unsigned thread_state; 2669 /* A little safety measure */
2323#if NUM_CORES > 1 2670 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
2324 int oldlevel; 2671 return -1;
2325 unsigned current_state;
2326#endif
2327 2672
2328 if (thread == NULL) 2673 if (thread == NULL)
2329 thread = current; 2674 thread = cores[CURRENT_CORE].running;
2330 2675
2331#if NUM_CORES > 1 2676 /* Thread could be on any list and therefore on an interrupt accessible
2332 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2677 one - disable interrupts */
2333#endif 2678 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2334 2679
2335 thread_state = GET_THREAD_STATE(thread); 2680 LOCK_THREAD(thread);
2336 2681
2337#if NUM_CORES > 1 2682 /* Make sure it's not killed */
2338 /* We can't lock the same slot twice. The waitee will also lock itself 2683 if (thread->state != STATE_KILLED)
2339 first then the thread slots that will be locked and woken in turn.
2340 The same order must be observed here as well. */
2341 if (thread == current)
2342 {
2343 current_state = thread_state;
2344 }
2345 else
2346 { 2684 {
2347 current_state = GET_THREAD_STATE(current); 2685 int old_priority = thread->priority;
2348 }
2349#endif
2350 2686
2351 if (thread_state != STATE_KILLED) 2687 old_base_priority = thread->base_priority;
2352 { 2688 thread->base_priority = priority;
2353 /* Unlock the waitee state at task switch - not done for self-wait 2689
2354 because the would double-unlock the state and potentially 2690 prio_move_entry(&thread->pdist, old_base_priority, priority);
2355 corrupt another's busy assert on the slot */ 2691 priority = find_first_set_bit(thread->pdist.mask);
2356 if (thread != current) 2692
2693 if (old_priority == priority)
2357 { 2694 {
2358#if CONFIG_CORELOCK == SW_CORELOCK 2695 /* No priority change - do nothing */
2359 cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
2360 cores[core].blk_ops.thread = thread;
2361#elif CONFIG_CORELOCK == CORELOCK_SWAP
2362 cores[core].blk_ops.flags |= TBOP_SET_VARu8;
2363 cores[core].blk_ops.var_u8p = &thread->state;
2364 cores[core].blk_ops.var_u8v = thread_state;
2365#endif
2366 } 2696 }
2367 block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED); 2697 else if (thread->state == STATE_RUNNING)
2368 switch_thread(current); 2698 {
2369 return; 2699 /* This thread is running - change location on the run
2370 } 2700 * queue. No transitive inheritance needed. */
2701 set_running_thread_priority(thread, priority);
2702 }
2703 else
2704 {
2705 thread->priority = priority;
2706
2707 if (thread->blocker != NULL)
2708 {
2709 /* Bubble new priority down the chain */
2710 struct blocker *bl = thread->blocker; /* Blocker struct */
2711 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2712 struct thread_entry * const tstart = thread; /* Initial thread */
2713 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2371 2714
2372 /* Unlock both slots - obviously the current thread can't have 2715 for (;;)
2373 STATE_KILLED so the above if clause will always catch a thread 2716 {
2374 waiting on itself */ 2717 struct thread_entry *next; /* Next thread to check */
2718 int bl_pr; /* Highest blocked thread */
2719 int queue_pr; /* New highest blocked thread */
2375#if NUM_CORES > 1 2720#if NUM_CORES > 1
2376 UNLOCK_THREAD(current, current_state); 2721 /* Owner can change but thread cannot be dislodged - thread
2377 UNLOCK_THREAD(thread, thread_state); 2722 * may not be the first in the queue which allows other
2378 set_irq_level(oldlevel); 2723 * threads ahead in the list to be given ownership during the
2379#endif 2724 * operation. If thread is next then the waker will have to
2380} 2725 * wait for us and the owner of the object will remain fixed.
2726 * If we successfully grab the owner -- which at some point
2727 * is guaranteed -- then the queue remains fixed until we
2728 * pass by. */
2729 for (;;)
2730 {
2731 LOCK_THREAD(bl_t);
2381 2732
2382#ifdef HAVE_PRIORITY_SCHEDULING 2733 /* Double-check the owner - retry if it changed */
2383/*--------------------------------------------------------------------------- 2734 if (bl->thread == bl_t)
2384 * Sets the thread's relative priority for the core it runs on. 2735 break;
2385 *---------------------------------------------------------------------------
2386 */
2387int thread_set_priority(struct thread_entry *thread, int priority)
2388{
2389 unsigned old_priority = (unsigned)-1;
2390
2391 if (thread == NULL)
2392 thread = cores[CURRENT_CORE].running;
2393 2736
2394#if NUM_CORES > 1 2737 UNLOCK_THREAD(bl_t);
2395 /* Thread could be on any list and therefore on an interrupt accessible 2738 bl_t = bl->thread;
2396 one - disable interrupts */ 2739 }
2397 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2398#endif 2740#endif
2399 unsigned state = GET_THREAD_STATE(thread); 2741 bl_pr = bl->priority;
2400 2742
2401 /* Make sure it's not killed */ 2743 if (highest > bl_pr)
2402 if (state != STATE_KILLED) 2744 break; /* Object priority won't change */
2403 { 2745
2404 old_priority = thread->priority; 2746 /* This will include the thread being set */
2405 thread->priority = priority; 2747 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2406 cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY; 2748
2749 if (queue_pr == bl_pr)
2750 break; /* Object priority not changing */
2751
2752 /* Update thread boost for this object */
2753 bl->priority = queue_pr;
2754 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2755 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2756
2757 if (bl_t->priority == bl_pr)
2758 break; /* Blocking thread priority not changing */
2759
2760 if (bl_t->state == STATE_RUNNING)
2761 {
2762 /* Thread not blocked - we're done */
2763 set_running_thread_priority(bl_t, bl_pr);
2764 break;
2765 }
2766
2767 bl_t->priority = bl_pr;
2768 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2769
2770 if (bl == NULL)
2771 break; /* End of chain */
2772
2773 next = bl->thread;
2774
2775 if (next == tstart)
2776 break; /* Full-circle */
2777
2778 UNLOCK_THREAD(thread);
2779
2780 thread = bl_t;
2781 bl_t = next;
2782 } /* for (;;) */
2783
2784 UNLOCK_THREAD(bl_t);
2785 }
2786 }
2407 } 2787 }
2408 2788
2409#if NUM_CORES > 1 2789 UNLOCK_THREAD(thread);
2410 UNLOCK_THREAD(thread, state); 2790
2411 set_irq_level(oldlevel); 2791 set_irq_level(oldlevel);
2412#endif 2792
2413 return old_priority; 2793 return old_base_priority;
2414} 2794}
2415 2795
2416/*--------------------------------------------------------------------------- 2796/*---------------------------------------------------------------------------
2417 * Returns the current priority for a thread. 2797 * Returns the current base priority for a thread.
2418 *--------------------------------------------------------------------------- 2798 *---------------------------------------------------------------------------
2419 */ 2799 */
2420int thread_get_priority(struct thread_entry *thread) 2800int thread_get_priority(struct thread_entry *thread)
@@ -2423,64 +2803,26 @@ int thread_get_priority(struct thread_entry *thread)
2423 if (thread == NULL) 2803 if (thread == NULL)
2424 thread = cores[CURRENT_CORE].running; 2804 thread = cores[CURRENT_CORE].running;
2425 2805
2426 return (unsigned)thread->priority; 2806 return thread->base_priority;
2427} 2807}
2808#endif /* HAVE_PRIORITY_SCHEDULING */
2428 2809
2429/*--------------------------------------------------------------------------- 2810/*---------------------------------------------------------------------------
2430 * Yield that guarantees thread execution once per round regardless of 2811 * Starts a frozen thread - similar semantics to wakeup_thread except that
2431 * thread's scheduler priority - basically a transient realtime boost 2812 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2432 * without altering the scheduler's thread precedence. 2813 * virtue of the slot having a state of STATE_FROZEN.
2433 *
2434 * HACK ALERT! Search for "priority inheritance" for proper treatment.
2435 *--------------------------------------------------------------------------- 2814 *---------------------------------------------------------------------------
2436 */ 2815 */
2437void priority_yield(void)
2438{
2439 const unsigned int core = CURRENT_CORE;
2440 struct thread_entry *thread = cores[core].running;
2441 thread->priority_x = HIGHEST_PRIORITY;
2442 switch_thread(NULL);
2443 thread->priority_x = LOWEST_PRIORITY;
2444}
2445#endif /* HAVE_PRIORITY_SCHEDULING */
2446
2447/* Resumes a frozen thread - similar logic to wakeup_thread except that
2448 the thread is on no scheduler list at all. It exists simply by virtue of
2449 the slot having a state of STATE_FROZEN. */
2450void thread_thaw(struct thread_entry *thread) 2816void thread_thaw(struct thread_entry *thread)
2451{ 2817{
2452#if NUM_CORES > 1
2453 /* Thread could be on any list and therefore on an interrupt accessible
2454 one - disable interrupts */
2455 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2818 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2456#endif 2819 LOCK_THREAD(thread);
2457 unsigned state = GET_THREAD_STATE(thread);
2458 2820
2459 if (state == STATE_FROZEN) 2821 if (thread->state == STATE_FROZEN)
2460 { 2822 core_schedule_wakeup(thread);
2461 const unsigned int core = CURRENT_CORE;
2462#if NUM_CORES > 1
2463 if (thread->core != core)
2464 {
2465 core_schedule_wakeup(thread);
2466 }
2467 else
2468#endif
2469 {
2470 add_to_list_l(&cores[core].running, thread);
2471 }
2472
2473 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
2474#if NUM_CORES > 1
2475 set_irq_level(oldlevel);
2476#endif
2477 return;
2478 }
2479 2823
2480#if NUM_CORES > 1 2824 UNLOCK_THREAD(thread);
2481 UNLOCK_THREAD(thread, state);
2482 set_irq_level(oldlevel); 2825 set_irq_level(oldlevel);
2483#endif
2484} 2826}
2485 2827
2486/*--------------------------------------------------------------------------- 2828/*---------------------------------------------------------------------------
@@ -2501,21 +2843,31 @@ unsigned int switch_core(unsigned int new_core)
2501{ 2843{
2502 const unsigned int core = CURRENT_CORE; 2844 const unsigned int core = CURRENT_CORE;
2503 struct thread_entry *current = cores[core].running; 2845 struct thread_entry *current = cores[core].running;
2504 struct thread_entry *w;
2505 int oldlevel;
2506
2507 /* Interrupts can access the lists that will be used - disable them */
2508 unsigned state = GET_THREAD_STATE(current);
2509 2846
2510 if (core == new_core) 2847 if (core == new_core)
2511 { 2848 {
2512 /* No change - just unlock everything and return same core */ 2849 /* No change - just return same core */
2513 UNLOCK_THREAD(current, state);
2514 return core; 2850 return core;
2515 } 2851 }
2516 2852
2853 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
2854 LOCK_THREAD(current);
2855
2856 if (current->name == THREAD_DESTRUCT)
2857 {
2858 /* Thread being killed - deactivate and let process complete */
2859 UNLOCK_THREAD(current);
2860 set_irq_level(oldlevel);
2861 thread_wait(current);
2862 /* Should never be reached */
2863 THREAD_PANICF("switch_core->D:*R", current);
2864 }
2865
2517 /* Get us off the running list for the current core */ 2866 /* Get us off the running list for the current core */
2867 RTR_LOCK(core);
2518 remove_from_list_l(&cores[core].running, current); 2868 remove_from_list_l(&cores[core].running, current);
2869 rtr_subtract_entry(core, current->priority);
2870 RTR_UNLOCK(core);
2519 2871
2520 /* Stash return value (old core) in a safe place */ 2872 /* Stash return value (old core) in a safe place */
2521 current->retval = core; 2873 current->retval = core;
@@ -2532,39 +2884,31 @@ unsigned int switch_core(unsigned int new_core)
2532 2884
2533 /* Do not use core_schedule_wakeup here since this will result in 2885 /* Do not use core_schedule_wakeup here since this will result in
2534 * the thread starting to run on the other core before being finished on 2886 * the thread starting to run on the other core before being finished on
2535 * this one. Delay the wakeup list unlock to keep the other core stuck 2887 * this one. Delay the list unlock to keep the other core stuck
2536 * until this thread is ready. */ 2888 * until this thread is ready. */
2537 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 2889 RTR_LOCK(new_core);
2538 w = LOCK_LIST(&cores[new_core].waking); 2890
2539 ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current); 2891 rtr_add_entry(new_core, current->priority);
2892 add_to_list_l(&cores[new_core].running, current);
2540 2893
2541 /* Make a callback into device-specific code, unlock the wakeup list so 2894 /* Make a callback into device-specific code, unlock the wakeup list so
2542 * that execution may resume on the new core, unlock our slot and finally 2895 * that execution may resume on the new core, unlock our slot and finally
2543 * restore the interrupt level */ 2896 * restore the interrupt level */
2544 cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT | 2897 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2545 TBOP_UNLOCK_LIST; 2898 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2546 cores[core].blk_ops.list_p = &cores[new_core].waking; 2899 cores[core].block_task = current;
2547#if CONFIG_CORELOCK == CORELOCK_SWAP 2900
2548 cores[core].blk_ops.state = STATE_RUNNING; 2901 UNLOCK_THREAD(current);
2549 cores[core].blk_ops.list_v = w; 2902
2550#endif 2903 /* Alert other core to activity */
2904 core_wake(new_core);
2551 2905
2552#ifdef HAVE_PRIORITY_SCHEDULING
2553 current->priority_x = HIGHEST_PRIORITY;
2554 cores[core].highest_priority = LOWEST_PRIORITY;
2555#endif
2556 /* Do the stack switching, cache_maintenence and switch_thread call - 2906 /* Do the stack switching, cache_maintenence and switch_thread call -
2557 requires native code */ 2907 requires native code */
2558 switch_thread_core(core, current); 2908 switch_thread_core(core, current);
2559 2909
2560#ifdef HAVE_PRIORITY_SCHEDULING
2561 current->priority_x = LOWEST_PRIORITY;
2562 cores[current->core].highest_priority = LOWEST_PRIORITY;
2563#endif
2564
2565 /* Finally return the old core to caller */ 2910 /* Finally return the old core to caller */
2566 return current->retval; 2911 return current->retval;
2567 (void)state;
2568} 2912}
2569#endif /* NUM_CORES > 1 */ 2913#endif /* NUM_CORES > 1 */
2570 2914
@@ -2578,12 +2922,11 @@ void init_threads(void)
2578{ 2922{
2579 const unsigned int core = CURRENT_CORE; 2923 const unsigned int core = CURRENT_CORE;
2580 struct thread_entry *thread; 2924 struct thread_entry *thread;
2581 int slot;
2582 2925
2583 /* CPU will initialize first and then sleep */ 2926 /* CPU will initialize first and then sleep */
2584 slot = find_empty_thread_slot(); 2927 thread = find_empty_thread_slot();
2585 2928
2586 if (slot >= MAXTHREADS) 2929 if (thread == NULL)
2587 { 2930 {
2588 /* WTF? There really must be a slot available at this stage. 2931 /* WTF? There really must be a slot available at this stage.
2589 * This can fail if, for example, .bss isn't zero'ed out by the loader 2932 * This can fail if, for example, .bss isn't zero'ed out by the loader
@@ -2592,33 +2935,29 @@ void init_threads(void)
2592 } 2935 }
2593 2936
2594 /* Initialize initially non-zero members of core */ 2937 /* Initialize initially non-zero members of core */
2595 thread_queue_init(&cores[core].waking);
2596 cores[core].next_tmo_check = current_tick; /* Something not in the past */ 2938 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2597#ifdef HAVE_PRIORITY_SCHEDULING
2598 cores[core].highest_priority = LOWEST_PRIORITY;
2599#endif
2600 2939
2601 /* Initialize initially non-zero members of slot */ 2940 /* Initialize initially non-zero members of slot */
2602 thread = &threads[slot]; 2941 UNLOCK_THREAD(thread); /* No sync worries yet */
2603 thread->name = main_thread_name; 2942 thread->name = main_thread_name;
2604 UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */ 2943 thread->state = STATE_RUNNING;
2605#if NUM_CORES > 1 2944 IF_COP( thread->core = core; )
2606 thread->core = core;
2607#endif
2608#ifdef HAVE_PRIORITY_SCHEDULING 2945#ifdef HAVE_PRIORITY_SCHEDULING
2946 corelock_init(&cores[core].rtr_cl);
2947 thread->base_priority = PRIORITY_USER_INTERFACE;
2948 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2609 thread->priority = PRIORITY_USER_INTERFACE; 2949 thread->priority = PRIORITY_USER_INTERFACE;
2610 thread->priority_x = LOWEST_PRIORITY; 2950 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2611#endif
2612#if CONFIG_CORELOCK == SW_CORELOCK
2613 corelock_init(&thread->cl);
2614#endif 2951#endif
2952 corelock_init(&thread->waiter_cl);
2953 corelock_init(&thread->slot_cl);
2615 2954
2616 add_to_list_l(&cores[core].running, thread); 2955 add_to_list_l(&cores[core].running, thread);
2617 2956
2618 if (core == CPU) 2957 if (core == CPU)
2619 { 2958 {
2620 thread->stack = stackbegin; 2959 thread->stack = stackbegin;
2621 thread->stack_size = (int)stackend - (int)stackbegin; 2960 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2622#if NUM_CORES > 1 /* This code path will not be run on single core targets */ 2961#if NUM_CORES > 1 /* This code path will not be run on single core targets */
2623 /* TODO: HAL interface for this */ 2962 /* TODO: HAL interface for this */
2624 /* Wake up coprocessor and let it initialize kernel and threads */ 2963 /* Wake up coprocessor and let it initialize kernel and threads */
@@ -2638,22 +2977,21 @@ void init_threads(void)
2638 /* Get COP safely primed inside switch_thread where it will remain 2977 /* Get COP safely primed inside switch_thread where it will remain
2639 * until a thread actually exists on it */ 2978 * until a thread actually exists on it */
2640 CPU_CTL = PROC_WAKE; 2979 CPU_CTL = PROC_WAKE;
2641 remove_thread(NULL); 2980 thread_exit();
2642#endif /* NUM_CORES */ 2981#endif /* NUM_CORES */
2643 } 2982 }
2644} 2983}
2645 2984
2646/*--------------------------------------------------------------------------- 2985/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2647 * Returns the maximum percentage of stack a thread ever used while running. 2986#if NUM_CORES == 1
2648 * NOTE: Some large buffer allocations that don't use enough the buffer to 2987static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2649 * overwrite stackptr[0] will not be seen. 2988#else
2650 *--------------------------------------------------------------------------- 2989static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2651 */ 2990#endif
2652int thread_stack_usage(const struct thread_entry *thread)
2653{ 2991{
2654 unsigned int *stackptr = thread->stack; 2992 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2655 int stack_words = thread->stack_size / sizeof (int); 2993 unsigned int i;
2656 int i, usage = 0; 2994 int usage = 0;
2657 2995
2658 for (i = 0; i < stack_words; i++) 2996 for (i = 0; i < stack_words; i++)
2659 { 2997 {
@@ -2667,6 +3005,17 @@ int thread_stack_usage(const struct thread_entry *thread)
2667 return usage; 3005 return usage;
2668} 3006}
2669 3007
3008/*---------------------------------------------------------------------------
3009 * Returns the maximum percentage of stack a thread ever used while running.
3010 * NOTE: Some large buffer allocations that don't use enough the buffer to
3011 * overwrite stackptr[0] will not be seen.
3012 *---------------------------------------------------------------------------
3013 */
3014int thread_stack_usage(const struct thread_entry *thread)
3015{
3016 return stack_usage(thread->stack, thread->stack_size);
3017}
3018
2670#if NUM_CORES > 1 3019#if NUM_CORES > 1
2671/*--------------------------------------------------------------------------- 3020/*---------------------------------------------------------------------------
2672 * Returns the maximum percentage of the core's idle stack ever used during 3021 * Returns the maximum percentage of the core's idle stack ever used during
@@ -2675,19 +3024,7 @@ int thread_stack_usage(const struct thread_entry *thread)
2675 */ 3024 */
2676int idle_stack_usage(unsigned int core) 3025int idle_stack_usage(unsigned int core)
2677{ 3026{
2678 unsigned int *stackptr = idle_stacks[core]; 3027 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2679 int i, usage = 0;
2680
2681 for (i = 0; i < IDLE_STACK_WORDS; i++)
2682 {
2683 if (stackptr[i] != DEADBEEF)
2684 {
2685 usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
2686 break;
2687 }
2688 }
2689
2690 return usage;
2691} 3028}
2692#endif 3029#endif
2693 3030
diff --git a/uisimulator/sdl/SOURCES b/uisimulator/sdl/SOURCES
index 7971c57163..1d5b498248 100644
--- a/uisimulator/sdl/SOURCES
+++ b/uisimulator/sdl/SOURCES
@@ -1,5 +1,5 @@
1button.c 1button.c
2kernel.c 2kernel-sdl.c
3#ifdef HAVE_LCD_BITMAP 3#ifdef HAVE_LCD_BITMAP
4lcd-bitmap.c 4lcd-bitmap.c
5#elif defined(HAVE_LCD_CHARCELLS) 5#elif defined(HAVE_LCD_CHARCELLS)
diff --git a/uisimulator/sdl/kernel-sdl.c b/uisimulator/sdl/kernel-sdl.c
new file mode 100644
index 0000000000..b6e6a34551
--- /dev/null
+++ b/uisimulator/sdl/kernel-sdl.c
@@ -0,0 +1,168 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Felix Arends
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
20#include <stdlib.h>
21#include <SDL.h>
22#include <SDL_thread.h>
23#include "memory.h"
24#include "system-sdl.h"
25#include "uisdl.h"
26#include "kernel.h"
27#include "thread-sdl.h"
28#include "thread.h"
29#include "debug.h"
30
31static SDL_TimerID tick_timer_id;
32long start_tick;
33
34/* Condition to signal that "interrupts" may proceed */
35static SDL_cond *sim_thread_cond;
36/* Mutex to serialize changing levels and exclude other threads while
37 * inside a handler */
38static SDL_mutex *sim_irq_mtx;
39static int interrupt_level = HIGHEST_IRQ_LEVEL;
40static int handlers_pending = 0;
41static int status_reg = 0;
42
43extern void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
44
45/* Nescessary logic:
46 * 1) All threads must pass unblocked
47 * 2) Current handler must always pass unblocked
48 * 3) Threads must be excluded when irq routine is running
49 * 4) No more than one handler routine should execute at a time
50 */
51int set_irq_level(int level)
52{
53 SDL_LockMutex(sim_irq_mtx);
54
55 int oldlevel = interrupt_level;
56
57 if (status_reg == 0 && level == 0 && oldlevel != 0)
58 {
59 /* Not in a handler and "interrupts" are being reenabled */
60 if (handlers_pending > 0)
61 SDL_CondSignal(sim_thread_cond);
62 }
63
64 interrupt_level = level; /* save new level */
65
66 SDL_UnlockMutex(sim_irq_mtx);
67 return oldlevel;
68}
69
70void sim_enter_irq_handler(void)
71{
72 SDL_LockMutex(sim_irq_mtx);
73 handlers_pending++;
74
75 if(interrupt_level != 0)
76 {
77 /* "Interrupts" are disabled. Wait for reenable */
78 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
79 }
80
81 status_reg = 1;
82}
83
84void sim_exit_irq_handler(void)
85{
86 if (--handlers_pending > 0)
87 SDL_CondSignal(sim_thread_cond);
88
89 status_reg = 0;
90 SDL_UnlockMutex(sim_irq_mtx);
91}
92
93bool sim_kernel_init(void)
94{
95 sim_irq_mtx = SDL_CreateMutex();
96 if (sim_irq_mtx == NULL)
97 {
98 fprintf(stderr, "Cannot create sim_handler_mtx\n");
99 return false;
100 }
101
102 sim_thread_cond = SDL_CreateCond();
103 if (sim_thread_cond == NULL)
104 {
105 fprintf(stderr, "Cannot create sim_thread_cond\n");
106 return false;
107 }
108
109 return true;
110}
111
112void sim_kernel_shutdown(void)
113{
114 SDL_RemoveTimer(tick_timer_id);
115 SDL_DestroyMutex(sim_irq_mtx);
116 SDL_DestroyCond(sim_thread_cond);
117}
118
119Uint32 tick_timer(Uint32 interval, void *param)
120{
121 long new_tick;
122
123 (void) interval;
124 (void) param;
125
126 new_tick = (SDL_GetTicks() - start_tick) / (1000/HZ);
127
128 if(new_tick != current_tick)
129 {
130 long t;
131 for(t = new_tick - current_tick; t > 0; t--)
132 {
133 int i;
134
135 sim_enter_irq_handler();
136
137 /* Run through the list of tick tasks */
138 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
139 {
140 if(tick_funcs[i])
141 {
142 tick_funcs[i]();
143 }
144 }
145
146 sim_exit_irq_handler();
147 }
148
149 current_tick = new_tick;
150 }
151
152 return 1;
153}
154
155void tick_start(unsigned int interval_in_ms)
156{
157 if (tick_timer_id != NULL)
158 {
159 SDL_RemoveTimer(tick_timer_id);
160 tick_timer_id = NULL;
161 }
162 else
163 {
164 start_tick = SDL_GetTicks();
165 }
166
167 tick_timer_id = SDL_AddTimer(interval_in_ms, tick_timer, NULL);
168}
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
deleted file mode 100644
index d67fb2b9f1..0000000000
--- a/uisimulator/sdl/kernel.c
+++ /dev/null
@@ -1,739 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Felix Arends
11 *
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
14 *
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
17 *
18 ****************************************************************************/
19
20#include <stdlib.h>
21#include <SDL.h>
22#include <SDL_thread.h>
23#include "memory.h"
24#include "system-sdl.h"
25#include "uisdl.h"
26#include "kernel.h"
27#include "thread-sdl.h"
28#include "thread.h"
29#include "debug.h"
30
31/* Condition to signal that "interrupts" may proceed */
32static SDL_cond *sim_thread_cond;
33/* Mutex to serialize changing levels and exclude other threads while
34 * inside a handler */
35static SDL_mutex *sim_irq_mtx;
36static int interrupt_level = HIGHEST_IRQ_LEVEL;
37static int handlers_pending = 0;
38static int status_reg = 0;
39
40extern struct core_entry cores[NUM_CORES];
41
42/* Nescessary logic:
43 * 1) All threads must pass unblocked
44 * 2) Current handler must always pass unblocked
45 * 3) Threads must be excluded when irq routine is running
46 * 4) No more than one handler routine should execute at a time
47 */
48int set_irq_level(int level)
49{
50 SDL_LockMutex(sim_irq_mtx);
51
52 int oldlevel = interrupt_level;
53
54 if (status_reg == 0 && level == 0 && oldlevel != 0)
55 {
56 /* Not in a handler and "interrupts" are being reenabled */
57 if (handlers_pending > 0)
58 SDL_CondSignal(sim_thread_cond);
59 }
60
61 interrupt_level = level; /* save new level */
62
63 SDL_UnlockMutex(sim_irq_mtx);
64 return oldlevel;
65}
66
67void sim_enter_irq_handler(void)
68{
69 SDL_LockMutex(sim_irq_mtx);
70 handlers_pending++;
71
72 if(interrupt_level != 0)
73 {
74 /* "Interrupts" are disabled. Wait for reenable */
75 SDL_CondWait(sim_thread_cond, sim_irq_mtx);
76 }
77
78 status_reg = 1;
79}
80
81void sim_exit_irq_handler(void)
82{
83 if (--handlers_pending > 0)
84 SDL_CondSignal(sim_thread_cond);
85
86 status_reg = 0;
87 SDL_UnlockMutex(sim_irq_mtx);
88}
89
90bool sim_kernel_init(void)
91{
92 sim_irq_mtx = SDL_CreateMutex();
93 if (sim_irq_mtx == NULL)
94 {
95 fprintf(stderr, "Cannot create sim_handler_mtx\n");
96 return false;
97 }
98
99 sim_thread_cond = SDL_CreateCond();
100 if (sim_thread_cond == NULL)
101 {
102 fprintf(stderr, "Cannot create sim_thread_cond\n");
103 return false;
104 }
105
106 return true;
107}
108
109void sim_kernel_shutdown(void)
110{
111 SDL_DestroyMutex(sim_irq_mtx);
112 SDL_DestroyCond(sim_thread_cond);
113}
114
115volatile long current_tick = 0;
116static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
117
118/* This array holds all queues that are initiated. It is used for broadcast. */
119static struct event_queue *all_queues[MAX_NUM_QUEUES];
120static int num_queues = 0;
121
122#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
123/* Moves waiting thread's descriptor to the current sender when a
124 message is dequeued */
125static void queue_fetch_sender(struct queue_sender_list *send,
126 unsigned int i)
127{
128 struct thread_entry **spp = &send->senders[i];
129
130 if(*spp)
131 {
132 send->curr_sender = *spp;
133 *spp = NULL;
134 }
135}
136
137/* Puts the specified return value in the waiting thread's return value
138 and wakes the thread - a sender should be confirmed to exist first */
139static void queue_release_sender(struct thread_entry **sender,
140 intptr_t retval)
141{
142 (*sender)->retval = retval;
143 wakeup_thread_no_listlock(sender);
144 if(*sender != NULL)
145 {
146 fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
147 exit(-1);
148 }
149}
150
151/* Releases any waiting threads that are queued with queue_send -
152 reply with NULL */
153static void queue_release_all_senders(struct event_queue *q)
154{
155 if(q->send)
156 {
157 unsigned int i;
158 for(i = q->read; i != q->write; i++)
159 {
160 struct thread_entry **spp =
161 &q->send->senders[i & QUEUE_LENGTH_MASK];
162 if(*spp)
163 {
164 queue_release_sender(spp, 0);
165 }
166 }
167 }
168}
169
170/* Enables queue_send on the specified queue - caller allocates the extra
171 data structure */
172void queue_enable_queue_send(struct event_queue *q,
173 struct queue_sender_list *send)
174{
175 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
176 q->send = NULL;
177 if(send)
178 {
179 q->send = send;
180 memset(send, 0, sizeof(*send));
181 }
182 set_irq_level(oldlevel);
183}
184#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
185
186void queue_init(struct event_queue *q, bool register_queue)
187{
188 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
189
190 q->read = 0;
191 q->write = 0;
192 thread_queue_init(&q->queue);
193#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
194 q->send = NULL; /* No message sending by default */
195#endif
196
197 if(register_queue)
198 {
199 if(num_queues >= MAX_NUM_QUEUES)
200 {
201 fprintf(stderr, "queue_init->out of queues");
202 exit(-1);
203 }
204 /* Add it to the all_queues array */
205 all_queues[num_queues++] = q;
206 }
207
208 set_irq_level(oldlevel);
209}
210
211void queue_delete(struct event_queue *q)
212{
213 int i;
214 bool found = false;
215
216 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
217
218 /* Find the queue to be deleted */
219 for(i = 0;i < num_queues;i++)
220 {
221 if(all_queues[i] == q)
222 {
223 found = true;
224 break;
225 }
226 }
227
228 if(found)
229 {
230 /* Move the following queues up in the list */
231 for(;i < num_queues-1;i++)
232 {
233 all_queues[i] = all_queues[i+1];
234 }
235
236 num_queues--;
237 }
238
239 /* Release threads waiting on queue head */
240 thread_queue_wake(&q->queue);
241
242#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
243 /* Release waiting threads and reply to any dequeued message
244 waiting for one. */
245 queue_release_all_senders(q);
246 queue_reply(q, 0);
247#endif
248
249 q->read = 0;
250 q->write = 0;
251
252 set_irq_level(oldlevel);
253}
254
255void queue_wait(struct event_queue *q, struct queue_event *ev)
256{
257 unsigned int rd;
258 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
259
260#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
261 if (q->send && q->send->curr_sender)
262 {
263 /* auto-reply */
264 queue_release_sender(&q->send->curr_sender, 0);
265 }
266#endif
267
268 if (q->read == q->write)
269 {
270 do
271 {
272 block_thread(&q->queue);
273 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
274 }
275 while (q->read == q->write);
276 }
277
278 rd = q->read++ & QUEUE_LENGTH_MASK;
279 *ev = q->events[rd];
280
281#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
282 if(q->send && q->send->senders[rd])
283 {
284 /* Get data for a waiting thread if one */
285 queue_fetch_sender(q->send, rd);
286 }
287#endif
288
289 set_irq_level(oldlevel);
290}
291
292void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
293{
294 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
295
296#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
297 if (q->send && q->send->curr_sender)
298 {
299 /* auto-reply */
300 queue_release_sender(&q->send->curr_sender, 0);
301 }
302#endif
303
304 if (q->read == q->write && ticks > 0)
305 {
306 block_thread_w_tmo(&q->queue, ticks);
307 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
308 }
309
310 if(q->read != q->write)
311 {
312 unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
313 *ev = q->events[rd];
314
315#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
316 if(q->send && q->send->senders[rd])
317 {
318 /* Get data for a waiting thread if one */
319 queue_fetch_sender(q->send, rd);
320 }
321#endif
322 }
323 else
324 {
325 ev->id = SYS_TIMEOUT;
326 }
327
328 set_irq_level(oldlevel);
329}
330
331void queue_post(struct event_queue *q, long id, intptr_t data)
332{
333 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
334
335 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
336
337 q->events[wr].id = id;
338 q->events[wr].data = data;
339
340#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
341 if(q->send)
342 {
343 struct thread_entry **spp = &q->send->senders[wr];
344
345 if(*spp)
346 {
347 /* overflow protect - unblock any thread waiting at this index */
348 queue_release_sender(spp, 0);
349 }
350 }
351#endif
352
353 wakeup_thread(&q->queue);
354
355 set_irq_level(oldlevel);
356}
357
358#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
359intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
360{
361 int oldlevel = set_irq_level(oldlevel);
362
363 unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
364
365 q->events[wr].id = id;
366 q->events[wr].data = data;
367
368 if(q->send)
369 {
370 struct thread_entry **spp = &q->send->senders[wr];
371
372 if(*spp)
373 {
374 /* overflow protect - unblock any thread waiting at this index */
375 queue_release_sender(spp, 0);
376 }
377
378 wakeup_thread(&q->queue);
379
380 block_thread_no_listlock(spp);
381 return thread_get_current()->retval;
382 }
383
384 /* Function as queue_post if sending is not enabled */
385 wakeup_thread(&q->queue);
386 set_irq_level(oldlevel);
387 return 0;
388}
389
390#if 0 /* not used now but probably will be later */
391/* Query if the last message dequeued was added by queue_send or not */
392bool queue_in_queue_send(struct event_queue *q)
393{
394 return q->send && q->send->curr_sender;
395}
396#endif
397
398/* Replies with retval to any dequeued message sent with queue_send */
399void queue_reply(struct event_queue *q, intptr_t retval)
400{
401 if(q->send && q->send->curr_sender)
402 {
403 queue_release_sender(&q->send->curr_sender, retval);
404 }
405}
406#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
407
408bool queue_empty(const struct event_queue* q)
409{
410 return ( q->read == q->write );
411}
412
413bool queue_peek(struct event_queue *q, struct queue_event *ev)
414{
415 if (q->read == q->write)
416 return false;
417
418 bool have_msg = false;
419
420 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
421
422 if (q->read != q->write)
423 {
424 *ev = q->events[q->read & QUEUE_LENGTH_MASK];
425 have_msg = true;
426 }
427
428 set_irq_level(oldlevel);
429
430 return have_msg;
431}
432
433void queue_clear(struct event_queue* q)
434{
435 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
436
437 /* fixme: This is potentially unsafe in case we do interrupt-like processing */
438#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
439 /* Release all thread waiting in the queue for a reply -
440 dequeued sent message will be handled by owning thread */
441 queue_release_all_senders(q);
442#endif
443 q->read = 0;
444 q->write = 0;
445
446 set_irq_level(oldlevel);
447}
448
449void queue_remove_from_head(struct event_queue *q, long id)
450{
451 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
452
453 while(q->read != q->write)
454 {
455 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
456
457 if(q->events[rd].id != id)
458 {
459 break;
460 }
461
462#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
463 if(q->send)
464 {
465 struct thread_entry **spp = &q->send->senders[rd];
466
467 if(*spp)
468 {
469 /* Release any thread waiting on this message */
470 queue_release_sender(spp, 0);
471 }
472 }
473#endif
474 q->read++;
475 }
476
477 set_irq_level(oldlevel);
478}
479
480int queue_count(const struct event_queue *q)
481{
482 return q->write - q->read;
483}
484
485int queue_broadcast(long id, intptr_t data)
486{
487 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
488 int i;
489
490 for(i = 0;i < num_queues;i++)
491 {
492 queue_post(all_queues[i], id, data);
493 }
494
495 set_irq_level(oldlevel);
496 return num_queues;
497}
498
499void yield(void)
500{
501 switch_thread(NULL);
502}
503
504void sleep(int ticks)
505{
506 sleep_thread(ticks);
507}
508
509void sim_tick_tasks(void)
510{
511 int i;
512
513 /* Run through the list of tick tasks */
514 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
515 {
516 if(tick_funcs[i])
517 {
518 tick_funcs[i]();
519 }
520 }
521}
522
523int tick_add_task(void (*f)(void))
524{
525 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
526 int i;
527
528 /* Add a task if there is room */
529 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
530 {
531 if(tick_funcs[i] == NULL)
532 {
533 tick_funcs[i] = f;
534 set_irq_level(oldlevel);
535 return 0;
536 }
537 }
538 fprintf(stderr, "Error! tick_add_task(): out of tasks");
539 exit(-1);
540 return -1;
541}
542
543int tick_remove_task(void (*f)(void))
544{
545 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
546 int i;
547
548 /* Remove a task if it is there */
549 for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
550 {
551 if(tick_funcs[i] == f)
552 {
553 tick_funcs[i] = NULL;
554 set_irq_level(oldlevel);
555 return 0;
556 }
557 }
558
559 set_irq_level(oldlevel);
560 return -1;
561}
562
563/* Very simple mutex simulation - won't work with pre-emptive
564 multitasking, but is better than nothing at all */
565void mutex_init(struct mutex *m)
566{
567 m->queue = NULL;
568 m->thread = NULL;
569 m->count = 0;
570 m->locked = 0;
571}
572
573void mutex_lock(struct mutex *m)
574{
575 struct thread_entry *const thread = thread_get_current();
576
577 if(thread == m->thread)
578 {
579 m->count++;
580 return;
581 }
582
583 if (!test_and_set(&m->locked, 1))
584 {
585 m->thread = thread;
586 return;
587 }
588
589 block_thread_no_listlock(&m->queue);
590}
591
592void mutex_unlock(struct mutex *m)
593{
594 /* unlocker not being the owner is an unlocking violation */
595 if(m->thread != thread_get_current())
596 {
597 fprintf(stderr, "mutex_unlock->wrong thread");
598 exit(-1);
599 }
600
601 if (m->count > 0)
602 {
603 /* this thread still owns lock */
604 m->count--;
605 return;
606 }
607
608 m->thread = wakeup_thread_no_listlock(&m->queue);
609
610 if (m->thread == NULL)
611 {
612 /* release lock */
613 m->locked = 0;
614 }
615}
616
617#ifdef HAVE_SEMAPHORE_OBJECTS
618void semaphore_init(struct semaphore *s, int max, int start)
619{
620 if(max <= 0 || start < 0 || start > max)
621 {
622 fprintf(stderr, "semaphore_init->inv arg");
623 exit(-1);
624 }
625 s->queue = NULL;
626 s->max = max;
627 s->count = start;
628}
629
630void semaphore_wait(struct semaphore *s)
631{
632 if(--s->count >= 0)
633 return;
634 block_thread_no_listlock(&s->queue);
635}
636
637void semaphore_release(struct semaphore *s)
638{
639 if(s->count < s->max)
640 {
641 if(++s->count <= 0)
642 {
643 if(s->queue == NULL)
644 {
645 /* there should be threads in this queue */
646 fprintf(stderr, "semaphore->wakeup");
647 exit(-1);
648 }
649 /* a thread was queued - wake it up */
650 wakeup_thread_no_listlock(&s->queue);
651 }
652 }
653}
654#endif /* HAVE_SEMAPHORE_OBJECTS */
655
656#ifdef HAVE_EVENT_OBJECTS
657void event_init(struct event *e, unsigned int flags)
658{
659 e->queues[STATE_NONSIGNALED] = NULL;
660 e->queues[STATE_SIGNALED] = NULL;
661 e->state = flags & STATE_SIGNALED;
662 e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
663}
664
665void event_wait(struct event *e, unsigned int for_state)
666{
667 unsigned int last_state = e->state;
668
669 if(e->automatic != 0)
670 {
671 /* wait for false always satisfied by definition
672 or if it just changed to false */
673 if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
674 {
675 /* automatic - unsignal */
676 e->state = STATE_NONSIGNALED;
677 return;
678 }
679 /* block until state matches */
680 }
681 else if(for_state == last_state)
682 {
683 /* the state being waited for is the current state */
684 return;
685 }
686
687 /* current state does not match wait-for state */
688 block_thread_no_listlock(&e->queues[for_state]);
689}
690
691void event_set_state(struct event *e, unsigned int state)
692{
693 unsigned int last_state = e->state;
694
695 if(last_state == state)
696 {
697 /* no change */
698 return;
699 }
700
701 if(state == STATE_SIGNALED)
702 {
703 if(e->automatic != 0)
704 {
705 struct thread_entry *thread;
706
707 if(e->queues[STATE_NONSIGNALED] != NULL)
708 {
709 /* no thread should have ever blocked for nonsignaled */
710 fprintf(stderr, "set_event_state->queue[NS]:S");
711 exit(-1);
712 }
713
714 /* pass to next thread and keep unsignaled - "pulse" */
715 thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
716 e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
717 }
718 else
719 {
720 /* release all threads waiting for signaled */
721 thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
722 e->state = STATE_SIGNALED;
723 }
724 }
725 else
726 {
727 /* release all threads waiting for unsignaled */
728 if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
729 {
730 /* no thread should have ever blocked */
731 fprintf(stderr, "set_event_state->queue[NS]:NS");
732 exit(-1);
733 }
734
735 thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
736 e->state = STATE_NONSIGNALED;
737 }
738}
739#endif /* HAVE_EVENT_OBJECTS */
diff --git a/uisimulator/sdl/system-sdl.h b/uisimulator/sdl/system-sdl.h
index 2197a014c3..c5e7d40560 100644
--- a/uisimulator/sdl/system-sdl.h
+++ b/uisimulator/sdl/system-sdl.h
@@ -29,4 +29,6 @@ void sim_exit_irq_handler(void);
29bool sim_kernel_init(void); 29bool sim_kernel_init(void);
30void sim_kernel_shutdown(void); 30void sim_kernel_shutdown(void);
31 31
32extern long start_tick;
33
32#endif /* _SYSTEM_SDL_H_ */ 34#endif /* _SYSTEM_SDL_H_ */
diff --git a/uisimulator/sdl/thread-sdl.c b/uisimulator/sdl/thread-sdl.c
index d1a8e60d01..78a66f72a7 100644
--- a/uisimulator/sdl/thread-sdl.c
+++ b/uisimulator/sdl/thread-sdl.c
@@ -26,6 +26,7 @@
26#include <setjmp.h> 26#include <setjmp.h>
27#include "system-sdl.h" 27#include "system-sdl.h"
28#include "thread-sdl.h" 28#include "thread-sdl.h"
29#include "system.h"
29#include "kernel.h" 30#include "kernel.h"
30#include "thread.h" 31#include "thread.h"
31#include "debug.h" 32#include "debug.h"
@@ -37,7 +38,7 @@
37#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__) 38#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
38static char __name[32]; 39static char __name[32];
39#define THREAD_SDL_GET_NAME(thread) \ 40#define THREAD_SDL_GET_NAME(thread) \
40 ({ thread_get_name(__name, sizeof(__name)/sizeof(__name[0]), thread); __name; }) 41 ({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; })
41#else 42#else
42#define THREAD_SDL_DEBUGF(...) 43#define THREAD_SDL_DEBUGF(...)
43#define THREAD_SDL_GET_NAME(thread) 44#define THREAD_SDL_GET_NAME(thread)
@@ -54,7 +55,6 @@ struct thread_entry threads[MAXTHREADS];
54 * way to get them back in there so they may exit */ 55 * way to get them back in there so they may exit */
55static jmp_buf thread_jmpbufs[MAXTHREADS]; 56static jmp_buf thread_jmpbufs[MAXTHREADS];
56static SDL_mutex *m; 57static SDL_mutex *m;
57static struct thread_entry *running;
58static bool threads_exit = false; 58static bool threads_exit = false;
59 59
60extern long start_tick; 60extern long start_tick;
@@ -78,7 +78,7 @@ void thread_sdl_shutdown(void)
78 { 78 {
79 /* Signal thread on delay or block */ 79 /* Signal thread on delay or block */
80 SDL_Thread *t = thread->context.t; 80 SDL_Thread *t = thread->context.t;
81 SDL_CondSignal(thread->context.c); 81 SDL_SemPost(thread->context.s);
82 SDL_UnlockMutex(m); 82 SDL_UnlockMutex(m);
83 /* Wait for it to finish */ 83 /* Wait for it to finish */
84 SDL_WaitThread(t, NULL); 84 SDL_WaitThread(t, NULL);
@@ -98,7 +98,7 @@ extern void app_main(void *param);
98static int thread_sdl_app_main(void *param) 98static int thread_sdl_app_main(void *param)
99{ 99{
100 SDL_LockMutex(m); 100 SDL_LockMutex(m);
101 running = &threads[0]; 101 cores[CURRENT_CORE].running = &threads[0];
102 102
103 /* Set the jump address for return */ 103 /* Set the jump address for return */
104 if (setjmp(thread_jmpbufs[0]) == 0) 104 if (setjmp(thread_jmpbufs[0]) == 0)
@@ -116,6 +116,8 @@ static int thread_sdl_app_main(void *param)
116/* Initialize SDL threading */ 116/* Initialize SDL threading */
117bool thread_sdl_init(void *param) 117bool thread_sdl_init(void *param)
118{ 118{
119 struct thread_entry *thread;
120 memset(cores, 0, sizeof(cores));
119 memset(threads, 0, sizeof(threads)); 121 memset(threads, 0, sizeof(threads));
120 122
121 m = SDL_CreateMutex(); 123 m = SDL_CreateMutex();
@@ -129,28 +131,30 @@ bool thread_sdl_init(void *param)
129 /* Slot 0 is reserved for the main thread - initialize it here and 131 /* Slot 0 is reserved for the main thread - initialize it here and
130 then create the SDL thread - it is possible to have a quick, early 132 then create the SDL thread - it is possible to have a quick, early
131 shutdown try to access the structure. */ 133 shutdown try to access the structure. */
132 running = &threads[0]; 134 thread = &threads[0];
133 running->stack = " "; 135 thread->stack = (uintptr_t *)" ";
134 running->stack_size = 8; 136 thread->stack_size = 8;
135 running->name = "main"; 137 thread->name = "main";
136 running->state = STATE_RUNNING; 138 thread->state = STATE_RUNNING;
137 running->context.c = SDL_CreateCond(); 139 thread->context.s = SDL_CreateSemaphore(0);
140 cores[CURRENT_CORE].running = thread;
138 141
139 if (running->context.c == NULL) 142 if (thread->context.s == NULL)
140 { 143 {
141 fprintf(stderr, "Failed to create main condition variable\n"); 144 fprintf(stderr, "Failed to create main semaphore\n");
142 return false; 145 return false;
143 } 146 }
144 147
145 running->context.t = SDL_CreateThread(thread_sdl_app_main, param); 148 thread->context.t = SDL_CreateThread(thread_sdl_app_main, param);
146 149
147 if (running->context.t == NULL) 150 if (thread->context.t == NULL)
148 { 151 {
152 SDL_DestroySemaphore(thread->context.s);
149 fprintf(stderr, "Failed to create main thread\n"); 153 fprintf(stderr, "Failed to create main thread\n");
150 return false; 154 return false;
151 } 155 }
152 156
153 THREAD_SDL_DEBUGF("Main thread: %p\n", running); 157 THREAD_SDL_DEBUGF("Main thread: %p\n", thread);
154 158
155 SDL_UnlockMutex(m); 159 SDL_UnlockMutex(m);
156 return true; 160 return true;
@@ -160,21 +164,22 @@ bool thread_sdl_init(void *param)
160void thread_sdl_thread_lock(void *me) 164void thread_sdl_thread_lock(void *me)
161{ 165{
162 SDL_LockMutex(m); 166 SDL_LockMutex(m);
163 running = (struct thread_entry *)me; 167 cores[CURRENT_CORE].running = (struct thread_entry *)me;
164 168
165 if (threads_exit) 169 if (threads_exit)
166 remove_thread(NULL); 170 thread_exit();
167} 171}
168 172
169void * thread_sdl_thread_unlock(void) 173void * thread_sdl_thread_unlock(void)
170{ 174{
171 struct thread_entry *current = running; 175 struct thread_entry *current = cores[CURRENT_CORE].running;
172 SDL_UnlockMutex(m); 176 SDL_UnlockMutex(m);
173 return current; 177 return current;
174} 178}
175 179
176static int find_empty_thread_slot(void) 180static struct thread_entry * find_empty_thread_slot(void)
177{ 181{
182 struct thread_entry *thread = NULL;
178 int n; 183 int n;
179 184
180 for (n = 0; n < MAXTHREADS; n++) 185 for (n = 0; n < MAXTHREADS; n++)
@@ -182,10 +187,13 @@ static int find_empty_thread_slot(void)
182 int state = threads[n].state; 187 int state = threads[n].state;
183 188
184 if (state == STATE_KILLED) 189 if (state == STATE_KILLED)
190 {
191 thread = &threads[n];
185 break; 192 break;
193 }
186 } 194 }
187 195
188 return n; 196 return thread;
189} 197}
190 198
191static void add_to_list_l(struct thread_entry **list, 199static void add_to_list_l(struct thread_entry **list,
@@ -229,64 +237,163 @@ static void remove_from_list_l(struct thread_entry **list,
229 thread->l.next->l.prev = thread->l.prev; 237 thread->l.next->l.prev = thread->l.prev;
230} 238}
231 239
232static inline void run_blocking_ops(void)
233{
234 set_irq_level(0);
235}
236
237struct thread_entry *thread_get_current(void) 240struct thread_entry *thread_get_current(void)
238{ 241{
239 return running; 242 return cores[CURRENT_CORE].running;
240} 243}
241 244
242void switch_thread(struct thread_entry *old) 245void switch_thread(void)
243{ 246{
244 struct thread_entry *current = running; 247 struct thread_entry *current = cores[CURRENT_CORE].running;
245 248
246 SDL_UnlockMutex(m); 249 set_irq_level(0);
247 /* Any other thread waiting already will get it first */
248 SDL_LockMutex(m);
249 running = current;
250 250
251 if (threads_exit) 251 switch (current->state)
252 remove_thread(NULL); 252 {
253 case STATE_RUNNING:
254 {
255 SDL_UnlockMutex(m);
256 /* Any other thread waiting already will get it first */
257 SDL_LockMutex(m);
258 break;
259 } /* STATE_RUNNING: */
260
261 case STATE_BLOCKED:
262 {
263 int oldlevel;
264
265 SDL_UnlockMutex(m);
266 SDL_SemWait(current->context.s);
267 SDL_LockMutex(m);
268
269 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
270 current->state = STATE_RUNNING;
271 set_irq_level(oldlevel);
272 break;
273 } /* STATE_BLOCKED: */
274
275 case STATE_BLOCKED_W_TMO:
276 {
277 int result, oldlevel;
278
279 SDL_UnlockMutex(m);
280 result = SDL_SemWaitTimeout(current->context.s, current->tmo_tick);
281 SDL_LockMutex(m);
282
283 oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
284
285 if (current->state == STATE_BLOCKED_W_TMO)
286 {
287 /* Timed out */
288 remove_from_list_l(current->bqp, current);
289
290#ifdef HAVE_WAKEUP_EXT_CB
291 if (current->wakeup_ext_cb != NULL)
292 current->wakeup_ext_cb(current);
293#endif
294 current->state = STATE_RUNNING;
295 }
253 296
254 (void)old; 297 if (result == SDL_MUTEX_TIMEDOUT)
298 {
299 /* Other signals from an explicit wake could have been made before
300 * arriving here if we timed out waiting for the semaphore. Make
301 * sure the count is reset. */
302 while (SDL_SemValue(current->context.s) > 0)
303 SDL_SemTryWait(current->context.s);
304 }
305
306 set_irq_level(oldlevel);
307 break;
308 } /* STATE_BLOCKED_W_TMO: */
309
310 case STATE_SLEEPING:
311 {
312 SDL_UnlockMutex(m);
313 SDL_SemWaitTimeout(current->context.s, current->tmo_tick);
314 SDL_LockMutex(m);
315 current->state = STATE_RUNNING;
316 break;
317 } /* STATE_SLEEPING: */
318 }
319
320 cores[CURRENT_CORE].running = current;
321
322 if (threads_exit)
323 thread_exit();
255} 324}
256 325
257void sleep_thread(int ticks) 326void sleep_thread(int ticks)
258{ 327{
259 struct thread_entry *current; 328 struct thread_entry *current = cores[CURRENT_CORE].running;
260 int rem; 329 int rem;
261 330
262 current = running;
263 current->state = STATE_SLEEPING; 331 current->state = STATE_SLEEPING;
264 332
265 rem = (SDL_GetTicks() - start_tick) % (1000/HZ); 333 rem = (SDL_GetTicks() - start_tick) % (1000/HZ);
266 if (rem < 0) 334 if (rem < 0)
267 rem = 0; 335 rem = 0;
268 336
269 rem = (1000/HZ) * ticks + ((1000/HZ)-1) - rem; 337 current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
338}
339
340void block_thread(struct thread_entry *current)
341{
342 current->state = STATE_BLOCKED;
343 add_to_list_l(current->bqp, current);
344}
345
346void block_thread_w_tmo(struct thread_entry *current, int ticks)
347{
348 current->state = STATE_BLOCKED_W_TMO;
349 current->tmo_tick = (1000/HZ)*ticks;
350 add_to_list_l(current->bqp, current);
351}
352
353unsigned int wakeup_thread(struct thread_entry **list)
354{
355 struct thread_entry *thread = *list;
270 356
271 if (rem == 0) 357 if (thread != NULL)
272 { 358 {
273 /* Unlock and give up rest of quantum */ 359 switch (thread->state)
274 SDL_UnlockMutex(m); 360 {
275 SDL_Delay(0); 361 case STATE_BLOCKED:
276 SDL_LockMutex(m); 362 case STATE_BLOCKED_W_TMO:
363 remove_from_list_l(list, thread);
364 thread->state = STATE_RUNNING;
365 SDL_SemPost(thread->context.s);
366 return THREAD_OK;
367 }
277 } 368 }
278 else 369
370 return THREAD_NONE;
371}
372
373unsigned int thread_queue_wake(struct thread_entry **list)
374{
375 unsigned int result = THREAD_NONE;
376
377 for (;;)
279 { 378 {
280 /* These sleeps must be signalable for thread exit */ 379 unsigned int rc = wakeup_thread(list);
281 SDL_CondWaitTimeout(current->context.c, m, rem);
282 }
283 380
284 running = current; 381 if (rc == THREAD_NONE)
382 break;
285 383
286 current->state = STATE_RUNNING; 384 result |= rc;
385 }
287 386
288 if (threads_exit) 387 return result;
289 remove_thread(NULL); 388}
389
390void thread_thaw(struct thread_entry *thread)
391{
392 if (thread->state == STATE_FROZEN)
393 {
394 thread->state = STATE_RUNNING;
395 SDL_SemPost(thread->context.s);
396 }
290} 397}
291 398
292int runthread(void *data) 399int runthread(void *data)
@@ -297,9 +404,9 @@ int runthread(void *data)
297 /* Cannot access thread variables before locking the mutex as the 404 /* Cannot access thread variables before locking the mutex as the
298 data structures may not be filled-in yet. */ 405 data structures may not be filled-in yet. */
299 SDL_LockMutex(m); 406 SDL_LockMutex(m);
300 running = (struct thread_entry *)data; 407 cores[CURRENT_CORE].running = (struct thread_entry *)data;
301 current = running; 408 current = cores[CURRENT_CORE].running;
302 current_jmpbuf = &thread_jmpbufs[running - threads]; 409 current_jmpbuf = &thread_jmpbufs[current - threads];
303 410
304 /* Setup jump for exit */ 411 /* Setup jump for exit */
305 if (setjmp(*current_jmpbuf) == 0) 412 if (setjmp(*current_jmpbuf) == 0)
@@ -307,9 +414,10 @@ int runthread(void *data)
307 /* Run the thread routine */ 414 /* Run the thread routine */
308 if (current->state == STATE_FROZEN) 415 if (current->state == STATE_FROZEN)
309 { 416 {
310 SDL_CondWait(current->context.c, m); 417 SDL_UnlockMutex(m);
311 running = current; 418 SDL_SemWait(current->context.s);
312 419 SDL_LockMutex(m);
420 cores[CURRENT_CORE].running = current;
313 } 421 }
314 422
315 if (!threads_exit) 423 if (!threads_exit)
@@ -320,7 +428,7 @@ int runthread(void *data)
320 /* Thread routine returned - suicide */ 428 /* Thread routine returned - suicide */
321 } 429 }
322 430
323 remove_thread(NULL); 431 thread_exit();
324 } 432 }
325 else 433 else
326 { 434 {
@@ -332,131 +440,59 @@ int runthread(void *data)
332} 440}
333 441
334struct thread_entry* 442struct thread_entry*
335 create_thread(void (*function)(void), void* stack, int stack_size, 443 create_thread(void (*function)(void), void* stack, size_t stack_size,
336 unsigned flags, const char *name) 444 unsigned flags, const char *name)
337{ 445{
338 /** Avoid compiler warnings */ 446 struct thread_entry *thread;
339 SDL_Thread* t; 447 SDL_Thread* t;
340 SDL_cond *cond; 448 SDL_sem *s;
341 int slot;
342 449
343 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : ""); 450 THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : "");
344 451
345 slot = find_empty_thread_slot(); 452 thread = find_empty_thread_slot();
346 if (slot >= MAXTHREADS) 453 if (thread == NULL)
347 { 454 {
348 DEBUGF("Failed to find thread slot\n"); 455 DEBUGF("Failed to find thread slot\n");
349 return NULL; 456 return NULL;
350 } 457 }
351 458
352 cond = SDL_CreateCond(); 459 s = SDL_CreateSemaphore(0);
353 if (cond == NULL) 460 if (s == NULL)
354 { 461 {
355 DEBUGF("Failed to create condition variable\n"); 462 DEBUGF("Failed to create semaphore\n");
356 return NULL; 463 return NULL;
357 } 464 }
358 465
359 t = SDL_CreateThread(runthread, &threads[slot]); 466 t = SDL_CreateThread(runthread, thread);
360 if (t == NULL) 467 if (t == NULL)
361 { 468 {
362 DEBUGF("Failed to create SDL thread\n"); 469 DEBUGF("Failed to create SDL thread\n");
363 SDL_DestroyCond(cond); 470 SDL_DestroySemaphore(s);
364 return NULL; 471 return NULL;
365 } 472 }
366 473
367 threads[slot].stack = stack; 474 thread->stack = stack;
368 threads[slot].stack_size = stack_size; 475 thread->stack_size = stack_size;
369 threads[slot].name = name; 476 thread->name = name;
370 threads[slot].state = (flags & CREATE_THREAD_FROZEN) ? 477 thread->state = (flags & CREATE_THREAD_FROZEN) ?
371 STATE_FROZEN : STATE_RUNNING; 478 STATE_FROZEN : STATE_RUNNING;
372 threads[slot].context.start = function; 479 thread->context.start = function;
373 threads[slot].context.t = t; 480 thread->context.t = t;
374 threads[slot].context.c = cond; 481 thread->context.s = s;
375 482
376 THREAD_SDL_DEBUGF("New Thread: %d (%s)\n", 483 THREAD_SDL_DEBUGF("New Thread: %d (%s)\n",
377 slot, THREAD_SDL_GET_NAME(&threads[slot])); 484 thread - threads, THREAD_SDL_GET_NAME(thread));
378 485
379 return &threads[slot]; 486 return thread;
380}
381
382void _block_thread(struct thread_queue *tq)
383{
384 struct thread_entry *thread = running;
385
386 thread->state = STATE_BLOCKED;
387 thread->bqp = tq;
388 add_to_list_l(&tq->queue, thread);
389
390 run_blocking_ops();
391
392 SDL_CondWait(thread->context.c, m);
393 running = thread;
394
395 if (threads_exit)
396 remove_thread(NULL);
397}
398
399void block_thread_w_tmo(struct thread_queue *tq, int ticks)
400{
401 struct thread_entry *thread = running;
402
403 thread->state = STATE_BLOCKED_W_TMO;
404 thread->bqp = tq;
405 add_to_list_l(&tq->queue, thread);
406
407 run_blocking_ops();
408
409 SDL_CondWaitTimeout(thread->context.c, m, (1000/HZ) * ticks);
410 running = thread;
411
412 if (thread->state == STATE_BLOCKED_W_TMO)
413 {
414 /* Timed out */
415 remove_from_list_l(&tq->queue, thread);
416 thread->state = STATE_RUNNING;
417 }
418
419 if (threads_exit)
420 remove_thread(NULL);
421}
422
423struct thread_entry * _wakeup_thread(struct thread_queue *tq)
424{
425 struct thread_entry *thread = tq->queue;
426
427 if (thread == NULL)
428 {
429 return NULL;
430 }
431
432 switch (thread->state)
433 {
434 case STATE_BLOCKED:
435 case STATE_BLOCKED_W_TMO:
436 remove_from_list_l(&tq->queue, thread);
437 thread->state = STATE_RUNNING;
438 SDL_CondSignal(thread->context.c);
439 return thread;
440 default:
441 return NULL;
442 }
443}
444
445void thread_thaw(struct thread_entry *thread)
446{
447 if (thread->state == STATE_FROZEN)
448 {
449 thread->state = STATE_RUNNING;
450 SDL_CondSignal(thread->context.c);
451 }
452} 487}
453 488
454void init_threads(void) 489void init_threads(void)
455{ 490{
456 /* Main thread is already initialized */ 491 /* Main thread is already initialized */
457 if (running != &threads[0]) 492 if (cores[CURRENT_CORE].running != &threads[0])
458 { 493 {
459 THREAD_PANICF("Wrong main thread in init_threads: %p\n", running); 494 THREAD_PANICF("Wrong main thread in init_threads: %p\n",
495 cores[CURRENT_CORE].running);
460 } 496 }
461 497
462 THREAD_SDL_DEBUGF("First Thread: %d (%s)\n", 498 THREAD_SDL_DEBUGF("First Thread: %d (%s)\n",
@@ -465,9 +501,9 @@ void init_threads(void)
465 501
466void remove_thread(struct thread_entry *thread) 502void remove_thread(struct thread_entry *thread)
467{ 503{
468 struct thread_entry *current = running; 504 struct thread_entry *current = cores[CURRENT_CORE].running;
469 SDL_Thread *t; 505 SDL_Thread *t;
470 SDL_cond *c; 506 SDL_sem *s;
471 507
472 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); 508 int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
473 509
@@ -477,7 +513,7 @@ void remove_thread(struct thread_entry *thread)
477 } 513 }
478 514
479 t = thread->context.t; 515 t = thread->context.t;
480 c = thread->context.c; 516 s = thread->context.s;
481 thread->context.t = NULL; 517 thread->context.t = NULL;
482 518
483 if (thread != current) 519 if (thread != current)
@@ -487,20 +523,25 @@ void remove_thread(struct thread_entry *thread)
487 case STATE_BLOCKED: 523 case STATE_BLOCKED:
488 case STATE_BLOCKED_W_TMO: 524 case STATE_BLOCKED_W_TMO:
489 /* Remove thread from object it's waiting on */ 525 /* Remove thread from object it's waiting on */
490 remove_from_list_l(&thread->bqp->queue, thread); 526 remove_from_list_l(thread->bqp, thread);
527
528#ifdef HAVE_WAKEUP_EXT_CB
529 if (thread->wakeup_ext_cb != NULL)
530 thread->wakeup_ext_cb(thread);
531#endif
491 break; 532 break;
492 } 533 }
493 534
494 SDL_CondSignal(c); 535 SDL_SemPost(s);
495 } 536 }
496 537
497 THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n", 538 THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
498 thread - threads, THREAD_SDL_GET_NAME(thread)); 539 thread - threads, THREAD_SDL_GET_NAME(thread));
499 540
500 thread_queue_wake_no_listlock(&thread->queue);
501 thread->state = STATE_KILLED; 541 thread->state = STATE_KILLED;
542 thread_queue_wake(&thread->queue);
502 543
503 SDL_DestroyCond(c); 544 SDL_DestroySemaphore(s);
504 545
505 if (thread == current) 546 if (thread == current)
506 { 547 {
@@ -514,14 +555,23 @@ void remove_thread(struct thread_entry *thread)
514 set_irq_level(oldlevel); 555 set_irq_level(oldlevel);
515} 556}
516 557
558void thread_exit(void)
559{
560 remove_thread(NULL);
561}
562
517void thread_wait(struct thread_entry *thread) 563void thread_wait(struct thread_entry *thread)
518{ 564{
565 struct thread_entry *current = cores[CURRENT_CORE].running;
566
519 if (thread == NULL) 567 if (thread == NULL)
520 thread = running; 568 thread = current;
521 569
522 if (thread->state != STATE_KILLED) 570 if (thread->state != STATE_KILLED)
523 { 571 {
524 block_thread_no_listlock(&thread->queue); 572 current->bqp = &thread->queue;
573 block_thread(current);
574 switch_thread();
525 } 575 }
526} 576}
527 577
diff --git a/uisimulator/sdl/uisdl.c b/uisimulator/sdl/uisdl.c
index e0a449ed48..09210926b5 100644
--- a/uisimulator/sdl/uisdl.c
+++ b/uisimulator/sdl/uisdl.c
@@ -40,19 +40,13 @@
40#include "SDL_thread.h" 40#include "SDL_thread.h"
41 41
42/* extern functions */ 42/* extern functions */
43extern void app_main (void *); /* mod entry point */ 43extern void new_key(int key);
44extern void new_key(int key);
45extern void sim_tick_tasks(void);
46extern bool sim_io_init(void);
47extern void sim_io_shutdown(void);
48 44
49void button_event(int key, bool pressed); 45void button_event(int key, bool pressed);
50 46
51SDL_Surface *gui_surface; 47SDL_Surface *gui_surface;
52bool background = false; /* Don't use backgrounds by default */ 48bool background = false; /* Don't use backgrounds by default */
53 49
54SDL_TimerID tick_timer_id;
55
56bool lcd_display_redraw = true; /* Used for player simulator */ 50bool lcd_display_redraw = true; /* Used for player simulator */
57char having_new_lcd = true; /* Used for player simulator */ 51char having_new_lcd = true; /* Used for player simulator */
58bool sim_alarm_wakeup = false; 52bool sim_alarm_wakeup = false;
@@ -63,31 +57,6 @@ bool debug_audio = false;
63bool debug_wps = false; 57bool debug_wps = false;
64int wps_verbose_level = 3; 58int wps_verbose_level = 3;
65 59
66long start_tick;
67
68Uint32 tick_timer(Uint32 interval, void *param)
69{
70 long new_tick;
71
72 (void) interval;
73 (void) param;
74
75 new_tick = (SDL_GetTicks() - start_tick) / (1000/HZ);
76
77 if (new_tick != current_tick) {
78 long i;
79 for (i = new_tick - current_tick; i > 0; i--)
80 {
81 sim_enter_irq_handler();
82 sim_tick_tasks();
83 sim_exit_irq_handler();
84 }
85 current_tick = new_tick;
86 }
87
88 return 1;
89}
90
91void gui_message_loop(void) 60void gui_message_loop(void)
92{ 61{
93 SDL_Event event; 62 SDL_Event event;
@@ -181,8 +150,6 @@ bool gui_startup(void)
181 SDL_UpdateRect(gui_surface, 0, 0, 0, 0); 150 SDL_UpdateRect(gui_surface, 0, 0, 0, 0);
182 } 151 }
183 152
184 start_tick = SDL_GetTicks();
185
186 return true; 153 return true;
187} 154}
188 155
@@ -191,7 +158,6 @@ bool gui_shutdown(void)
191 /* Order here is relevent to prevent deadlocks and use of destroyed 158 /* Order here is relevent to prevent deadlocks and use of destroyed
192 sync primitives by kernel threads */ 159 sync primitives by kernel threads */
193 thread_sdl_shutdown(); 160 thread_sdl_shutdown();
194 SDL_RemoveTimer(tick_timer_id);
195 sim_kernel_shutdown(); 161 sim_kernel_shutdown();
196 return true; 162 return true;
197} 163}
@@ -287,8 +253,6 @@ int main(int argc, char *argv[])
287 return -1; 253 return -1;
288 } 254 }
289 255
290 tick_timer_id = SDL_AddTimer(10, tick_timer, NULL);
291
292 gui_message_loop(); 256 gui_message_loop();
293 257
294 return gui_shutdown(); 258 return gui_shutdown();