diff options
Diffstat (limited to 'firmware')
-rw-r--r-- | firmware/export/system.h | 10 | ||||
-rw-r--r-- | firmware/kernel/mrsw_lock.c | 4 | ||||
-rw-r--r-- | firmware/kernel/mutex.c | 2 | ||||
-rw-r--r-- | firmware/kernel/queue.c | 10 | ||||
-rw-r--r-- | firmware/kernel/semaphore.c | 2 | ||||
-rw-r--r-- | firmware/kernel/thread.c | 2 | ||||
-rw-r--r-- | firmware/target/arm/system-arm.h | 25 |
7 files changed, 54 insertions, 1 deletions
diff --git a/firmware/export/system.h b/firmware/export/system.h index e83ee53d96..d33a35f6fa 100644 --- a/firmware/export/system.h +++ b/firmware/export/system.h | |||
@@ -204,6 +204,16 @@ enum { | |||
204 | #include "bitswap.h" | 204 | #include "bitswap.h" |
205 | #include "rbendian.h" | 205 | #include "rbendian.h" |
206 | 206 | ||
207 | #ifndef ASSERT_CPU_MODE | ||
208 | /* Very useful to have defined properly for your architecture */ | ||
209 | #define ASSERT_CPU_MODE(mode, rstatus...) \ | ||
210 | ({ (mode); rstatus; }) | ||
211 | #endif | ||
212 | |||
213 | #ifndef CPU_MODE_THREAD_CONTEXT | ||
214 | #define CPU_MODE_THREAD_CONTEXT 0 | ||
215 | #endif | ||
216 | |||
207 | #ifndef BIT_N | 217 | #ifndef BIT_N |
208 | #define BIT_N(n) (1U << (n)) | 218 | #define BIT_N(n) (1U << (n)) |
209 | #endif | 219 | #endif |
diff --git a/firmware/kernel/mrsw_lock.c b/firmware/kernel/mrsw_lock.c index b683f63d5f..6120666d05 100644 --- a/firmware/kernel/mrsw_lock.c +++ b/firmware/kernel/mrsw_lock.c | |||
@@ -159,6 +159,8 @@ void mrsw_init(struct mrsw_lock *mrsw) | |||
159 | * access recursively. The current writer is ignored and gets access. */ | 159 | * access recursively. The current writer is ignored and gets access. */ |
160 | void mrsw_read_acquire(struct mrsw_lock *mrsw) | 160 | void mrsw_read_acquire(struct mrsw_lock *mrsw) |
161 | { | 161 | { |
162 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT); | ||
163 | |||
162 | struct thread_entry *current = __running_self_entry(); | 164 | struct thread_entry *current = __running_self_entry(); |
163 | 165 | ||
164 | if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) | 166 | if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) |
@@ -268,6 +270,8 @@ void mrsw_read_release(struct mrsw_lock *mrsw) | |||
268 | * safely call recursively. */ | 270 | * safely call recursively. */ |
269 | void mrsw_write_acquire(struct mrsw_lock *mrsw) | 271 | void mrsw_write_acquire(struct mrsw_lock *mrsw) |
270 | { | 272 | { |
273 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT); | ||
274 | |||
271 | struct thread_entry *current = __running_self_entry(); | 275 | struct thread_entry *current = __running_self_entry(); |
272 | 276 | ||
273 | if (current == mrsw->splay.blocker.thread) | 277 | if (current == mrsw->splay.blocker.thread) |
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c index cb9e6816b8..b1ae3e9e54 100644 --- a/firmware/kernel/mutex.c +++ b/firmware/kernel/mutex.c | |||
@@ -39,6 +39,8 @@ void mutex_init(struct mutex *m) | |||
39 | /* Gain ownership of a mutex object or block until it becomes free */ | 39 | /* Gain ownership of a mutex object or block until it becomes free */ |
40 | void mutex_lock(struct mutex *m) | 40 | void mutex_lock(struct mutex *m) |
41 | { | 41 | { |
42 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT); | ||
43 | |||
42 | struct thread_entry *current = __running_self_entry(); | 44 | struct thread_entry *current = __running_self_entry(); |
43 | 45 | ||
44 | if(current == m->blocker.thread) | 46 | if(current == m->blocker.thread) |
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c index 927e55274c..70dba46c0a 100644 --- a/firmware/kernel/queue.c +++ b/firmware/kernel/queue.c | |||
@@ -283,6 +283,9 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) | |||
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | oldlevel = disable_irq_save(); | 285 | oldlevel = disable_irq_save(); |
286 | |||
287 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel); | ||
288 | |||
286 | corelock_lock(&q->cl); | 289 | corelock_lock(&q->cl); |
287 | 290 | ||
288 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 291 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
@@ -335,6 +338,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) | |||
335 | #endif | 338 | #endif |
336 | 339 | ||
337 | oldlevel = disable_irq_save(); | 340 | oldlevel = disable_irq_save(); |
341 | |||
342 | if (ticks != TIMEOUT_NOBLOCK) | ||
343 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel); | ||
344 | |||
338 | corelock_lock(&q->cl); | 345 | corelock_lock(&q->cl); |
339 | 346 | ||
340 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME | 347 | #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME |
@@ -421,6 +428,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) | |||
421 | unsigned int wr; | 428 | unsigned int wr; |
422 | 429 | ||
423 | oldlevel = disable_irq_save(); | 430 | oldlevel = disable_irq_save(); |
431 | |||
432 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel); | ||
433 | |||
424 | corelock_lock(&q->cl); | 434 | corelock_lock(&q->cl); |
425 | 435 | ||
426 | wr = q->write++ & QUEUE_LENGTH_MASK; | 436 | wr = q->write++ & QUEUE_LENGTH_MASK; |
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c index 5e9e46798f..6b58fa3d8a 100644 --- a/firmware/kernel/semaphore.c +++ b/firmware/kernel/semaphore.c | |||
@@ -57,6 +57,8 @@ int semaphore_wait(struct semaphore *s, int timeout) | |||
57 | } | 57 | } |
58 | else if(timeout != 0) | 58 | else if(timeout != 0) |
59 | { | 59 | { |
60 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel); | ||
61 | |||
60 | /* too many waits - block until count is upped... */ | 62 | /* too many waits - block until count is upped... */ |
61 | struct thread_entry *current = __running_self_entry(); | 63 | struct thread_entry *current = __running_self_entry(); |
62 | 64 | ||
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c index ea76421389..29ab9db873 100644 --- a/firmware/kernel/thread.c +++ b/firmware/kernel/thread.c | |||
@@ -1234,6 +1234,8 @@ unsigned int create_thread(void (*function)(void), | |||
1234 | */ | 1234 | */ |
1235 | void thread_wait(unsigned int thread_id) | 1235 | void thread_wait(unsigned int thread_id) |
1236 | { | 1236 | { |
1237 | ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT); | ||
1238 | |||
1237 | struct thread_entry *current = __running_self_entry(); | 1239 | struct thread_entry *current = __running_self_entry(); |
1238 | struct thread_entry *thread = __thread_id_entry(thread_id); | 1240 | struct thread_entry *thread = __thread_id_entry(thread_id); |
1239 | 1241 | ||
diff --git a/firmware/target/arm/system-arm.h b/firmware/target/arm/system-arm.h index 719ec82f1b..2d8c6f2c9f 100644 --- a/firmware/target/arm/system-arm.h +++ b/firmware/target/arm/system-arm.h | |||
@@ -76,9 +76,32 @@ void __div0(void); | |||
76 | #define ints_enabled_checkval(val) \ | 76 | #define ints_enabled_checkval(val) \ |
77 | (((val) & IRQ_FIQ_STATUS) == 0) | 77 | (((val) & IRQ_FIQ_STATUS) == 0) |
78 | 78 | ||
79 | #define CPU_MODE_USER 0x10 | ||
80 | #define CPU_MODE_FIQ 0x11 | ||
81 | #define CPU_MODE_IRQ 0x12 | ||
82 | #define CPU_MODE_SVC 0x13 | ||
83 | #define CPU_MODE_ABT 0x17 | ||
84 | #define CPU_MODE_UNDEF 0x1b | ||
85 | #define CPU_MODE_SYS 0x1f | ||
86 | |||
79 | /* We run in SYS mode */ | 87 | /* We run in SYS mode */ |
88 | #define CPU_MODE_THREAD_CONTEXT CPU_MODE_SYS | ||
89 | |||
80 | #define is_thread_context() \ | 90 | #define is_thread_context() \ |
81 | (get_processor_mode() == 0x1f) | 91 | (get_processor_mode() == CPU_MODE_THREAD_CONTEXT) |
92 | |||
93 | /* Assert that the processor is in the desired execution mode | ||
94 | * mode: Processor mode value to test for | ||
95 | * rstatus...: Provide if you already have the value saved, otherwise leave | ||
96 | * blank to get it automatically. | ||
97 | */ | ||
98 | #define ASSERT_CPU_MODE(mode, rstatus...) \ | ||
99 | ({ unsigned long __massert = (mode); \ | ||
100 | unsigned long __mproc = *#rstatus ? \ | ||
101 | ((rstatus +0) & 0x1f) : get_processor_mode(); \ | ||
102 | if (__mproc != __massert) \ | ||
103 | panicf("Incorrect CPU mode in %s (0x%02lx!=0x%02lx)", \ | ||
104 | __func__, __mproc, __massert); }) | ||
82 | 105 | ||
83 | /* Core-level interrupt masking */ | 106 | /* Core-level interrupt masking */ |
84 | 107 | ||