diff options
author | Michael Sevakis <jethead71@rockbox.org> | 2012-01-08 22:29:25 +0000 |
---|---|---|
committer | Michael Sevakis <jethead71@rockbox.org> | 2012-01-08 22:29:25 +0000 |
commit | 307cb049485cc20140b85aa78f8e2677e8df5851 (patch) | |
tree | eaadfd7266e4e97e69bccd68655140dd9f1ef061 /firmware/kernel.c | |
parent | 5e21bbf5757163725f4bd2909fc7aaa548f61fc3 (diff) | |
download | rockbox-307cb049485cc20140b85aa78f8e2677e8df5851.tar.gz rockbox-307cb049485cc20140b85aa78f8e2677e8df5851.zip |
AS3525v1/2: Enable nested handling of interrupts
Mostly for the sake of reducing latency for audio servicing where other service
routines can take a long time to complete, leading to occasional drops of a
few samples, especially in recording, where they are fairly frequent.
One mystery that remains is GPIOA IRQ being interrupted causes strange
undefined instruction exceptions, most easily produced on my Fuze V2 with a
scrollwheel. Making GPIOA the top ISR for now, thus not interruptible, cures it.
SVC mode is used during the actual calls. Hopefully the SVC stack size is
sufficient. Prologue and epilogue code only uses the IRQ stack and is large
enough.
Any routine code that should not be interrupted should disable IRQ itself from
here on in.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@31642 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r-- | firmware/kernel.c | 14 |
1 files changed, 4 insertions, 10 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c index 155205749f..0b39e29126 100644 --- a/firmware/kernel.c +++ b/firmware/kernel.c | |||
@@ -1195,9 +1195,7 @@ int semaphore_wait(struct semaphore *s, int timeout) | |||
1195 | * in 'semaphore_init'. */ | 1195 | * in 'semaphore_init'. */ |
1196 | void semaphore_release(struct semaphore *s) | 1196 | void semaphore_release(struct semaphore *s) |
1197 | { | 1197 | { |
1198 | #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval) | ||
1199 | unsigned int result = THREAD_NONE; | 1198 | unsigned int result = THREAD_NONE; |
1200 | #endif | ||
1201 | int oldlevel; | 1199 | int oldlevel; |
1202 | 1200 | ||
1203 | oldlevel = disable_irq_save(); | 1201 | oldlevel = disable_irq_save(); |
@@ -1209,11 +1207,7 @@ void semaphore_release(struct semaphore *s) | |||
1209 | KERNEL_ASSERT(s->count == 0, | 1207 | KERNEL_ASSERT(s->count == 0, |
1210 | "semaphore_release->threads queued but count=%d!\n", s->count); | 1208 | "semaphore_release->threads queued but count=%d!\n", s->count); |
1211 | s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ | 1209 | s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ |
1212 | #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval) | ||
1213 | result = wakeup_thread(&s->queue); | 1210 | result = wakeup_thread(&s->queue); |
1214 | #else | ||
1215 | wakeup_thread(&s->queue); | ||
1216 | #endif | ||
1217 | } | 1211 | } |
1218 | else | 1212 | else |
1219 | { | 1213 | { |
@@ -1228,11 +1222,11 @@ void semaphore_release(struct semaphore *s) | |||
1228 | corelock_unlock(&s->cl); | 1222 | corelock_unlock(&s->cl); |
1229 | restore_irq(oldlevel); | 1223 | restore_irq(oldlevel); |
1230 | 1224 | ||
1231 | #if defined(HAVE_PRIORITY_SCHEDULING) && defined(irq_enabled_checkval) | 1225 | #if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context) |
1232 | /* No thread switch if IRQ disabled - it's probably called via ISR. | 1226 | /* No thread switch if not thread context */ |
1233 | * switch_thread would as well enable them anyway. */ | 1227 | if((result & THREAD_SWITCH) && is_thread_context()) |
1234 | if((result & THREAD_SWITCH) && irq_enabled_checkval(oldlevel)) | ||
1235 | switch_thread(); | 1228 | switch_thread(); |
1236 | #endif | 1229 | #endif |
1230 | (void)result; | ||
1237 | } | 1231 | } |
1238 | #endif /* HAVE_SEMAPHORE_OBJECTS */ | 1232 | #endif /* HAVE_SEMAPHORE_OBJECTS */ |