diff options
Diffstat (limited to 'firmware')
29 files changed, 1563 insertions, 755 deletions
diff --git a/firmware/SOURCES b/firmware/SOURCES index ac9fe132a7..374210e30b 100644 --- a/firmware/SOURCES +++ b/firmware/SOURCES | |||
@@ -29,7 +29,6 @@ usb.c | |||
29 | #if defined(ROCKBOX_HAS_LOGF) || defined(ROCKBOX_HAS_LOGDISKF) | 29 | #if defined(ROCKBOX_HAS_LOGF) || defined(ROCKBOX_HAS_LOGDISKF) |
30 | logf.c | 30 | logf.c |
31 | #endif /* ROCKBOX_HAS_LOGF */ | 31 | #endif /* ROCKBOX_HAS_LOGF */ |
32 | kernel.c | ||
33 | #if (CONFIG_PLATFORM & PLATFORM_NATIVE) | 32 | #if (CONFIG_PLATFORM & PLATFORM_NATIVE) |
34 | load_code.c | 33 | load_code.c |
35 | #ifdef RB_PROFILE | 34 | #ifdef RB_PROFILE |
@@ -41,7 +40,6 @@ common/rb-loader.c | |||
41 | #if !defined(BOOTLOADER) || defined(CPU_SH) | 40 | #if !defined(BOOTLOADER) || defined(CPU_SH) |
42 | rolo.c | 41 | rolo.c |
43 | #endif /* !defined(BOOTLOADER) || defined(CPU_SH) */ | 42 | #endif /* !defined(BOOTLOADER) || defined(CPU_SH) */ |
44 | thread.c | ||
45 | timer.c | 43 | timer.c |
46 | debug.c | 44 | debug.c |
47 | #endif /* PLATFORM_NATIVE */ | 45 | #endif /* PLATFORM_NATIVE */ |
@@ -63,7 +61,6 @@ target/hosted/sdl/system-sdl.c | |||
63 | #ifdef HAVE_SDL_THREADS | 61 | #ifdef HAVE_SDL_THREADS |
64 | target/hosted/sdl/thread-sdl.c | 62 | target/hosted/sdl/thread-sdl.c |
65 | #else | 63 | #else |
66 | thread.c | ||
67 | #endif | 64 | #endif |
68 | target/hosted/sdl/timer-sdl.c | 65 | target/hosted/sdl/timer-sdl.c |
69 | #ifdef HAVE_TOUCHSCREEN | 66 | #ifdef HAVE_TOUCHSCREEN |
@@ -78,7 +75,6 @@ target/hosted/sdl/app/button-application.c | |||
78 | target/hosted/kernel-unix.c | 75 | target/hosted/kernel-unix.c |
79 | target/hosted/filesystem-unix.c | 76 | target/hosted/filesystem-unix.c |
80 | target/hosted/lc-unix.c | 77 | target/hosted/lc-unix.c |
81 | thread.c | ||
82 | drivers/lcd-memframe.c | 78 | drivers/lcd-memframe.c |
83 | target/hosted/samsungypr/lcd-ypr.c | 79 | target/hosted/samsungypr/lcd-ypr.c |
84 | target/hosted/samsungypr/gpio-ypr.c | 80 | target/hosted/samsungypr/gpio-ypr.c |
@@ -1813,7 +1809,6 @@ target/hosted/android/telephony-android.c | |||
1813 | target/hosted/android/app/button-application.c | 1809 | target/hosted/android/app/button-application.c |
1814 | #endif | 1810 | #endif |
1815 | drivers/audio/android.c | 1811 | drivers/audio/android.c |
1816 | thread.c | ||
1817 | #endif | 1812 | #endif |
1818 | 1813 | ||
1819 | #endif /* defined(SIMULATOR) */ | 1814 | #endif /* defined(SIMULATOR) */ |
@@ -1821,3 +1816,22 @@ thread.c | |||
1821 | #if defined(HAVE_TOUCHPAD) && !defined(HAS_BUTTON_HOLD) | 1816 | #if defined(HAVE_TOUCHPAD) && !defined(HAS_BUTTON_HOLD) |
1822 | drivers/touchpad.c | 1817 | drivers/touchpad.c |
1823 | #endif | 1818 | #endif |
1819 | |||
1820 | /* firmware/kernel section */ | ||
1821 | #ifdef HAVE_CORELOCK_OBJECT | ||
1822 | kernel/corelock.c | ||
1823 | #endif | ||
1824 | kernel/mutex.c | ||
1825 | kernel/queue.c | ||
1826 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
1827 | kernel/semaphore.c | ||
1828 | #endif | ||
1829 | #if defined(HAVE_SDL_THREADS) | ||
1830 | target/hosted/sdl/thread-sdl.c | ||
1831 | #else | ||
1832 | kernel/thread.c | ||
1833 | #endif | ||
1834 | kernel/tick.c | ||
1835 | #ifdef INCLUDE_TIMEOUT_API | ||
1836 | kernel/timeout.c | ||
1837 | #endif | ||
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c new file mode 100644 index 0000000000..713164e49b --- /dev/null +++ b/firmware/asm/arm/corelock.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2007 by Daniel Ankers | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | /* Core locks using Peterson's mutual exclusion algorithm. | ||
23 | * ASM optimized version of C code, see firmware/asm/corelock.c */ | ||
24 | |||
25 | #include "cpu.h" | ||
26 | |||
27 | /*--------------------------------------------------------------------------- | ||
28 | * Wait for the corelock to become free and acquire it when it does. | ||
29 | *--------------------------------------------------------------------------- | ||
30 | */ | ||
31 | void __attribute__((naked)) corelock_lock(struct corelock *cl) | ||
32 | { | ||
33 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
34 | asm volatile ( | ||
35 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
36 | "ldrb r1, [r1] \n" | ||
37 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
38 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
39 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
40 | "1: \n" | ||
41 | "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
42 | "cmp r3, #0 \n" /* yes? lock acquired */ | ||
43 | "bxeq lr \n" | ||
44 | "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ | ||
45 | "cmp r3, r1 \n" | ||
46 | "bxeq lr \n" /* yes? lock acquired */ | ||
47 | "b 1b \n" /* keep trying */ | ||
48 | : : "i"(&PROCESSOR_ID) | ||
49 | ); | ||
50 | (void)cl; | ||
51 | } | ||
52 | |||
53 | /*--------------------------------------------------------------------------- | ||
54 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
55 | *--------------------------------------------------------------------------- | ||
56 | */ | ||
57 | int __attribute__((naked)) corelock_try_lock(struct corelock *cl) | ||
58 | { | ||
59 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
60 | asm volatile ( | ||
61 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
62 | "ldrb r1, [r1] \n" | ||
63 | "mov r3, r0 \n" | ||
64 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
65 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
66 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
67 | "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
68 | "eors r0, r0, r2 \n" /* yes? lock acquired */ | ||
69 | "bxne lr \n" | ||
70 | "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ | ||
71 | "ands r0, r0, r1 \n" | ||
72 | "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ | ||
73 | "bx lr \n" /* return result */ | ||
74 | : : "i"(&PROCESSOR_ID) | ||
75 | ); | ||
76 | |||
77 | return 0; | ||
78 | (void)cl; | ||
79 | } | ||
80 | |||
81 | /*--------------------------------------------------------------------------- | ||
82 | * Release ownership of the corelock | ||
83 | *--------------------------------------------------------------------------- | ||
84 | */ | ||
85 | void __attribute__((naked)) corelock_unlock(struct corelock *cl) | ||
86 | { | ||
87 | asm volatile ( | ||
88 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
89 | "ldrb r1, [r1] \n" | ||
90 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ | ||
91 | "strb r2, [r0, r1, lsr #7] \n" | ||
92 | "bx lr \n" | ||
93 | : : "i"(&PROCESSOR_ID) | ||
94 | ); | ||
95 | (void)cl; | ||
96 | } | ||
diff --git a/firmware/asm/corelock.c b/firmware/asm/corelock.c new file mode 100644 index 0000000000..51d1d71961 --- /dev/null +++ b/firmware/asm/corelock.c | |||
@@ -0,0 +1,67 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2007 by Daniel Ankers | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | #include "config.h" | ||
23 | #include "corelock.h" | ||
24 | /* Core locks using Peterson's mutual exclusion algorithm. */ | ||
25 | |||
26 | #ifdef CPU_ARM | ||
27 | #include "arm/corelock.c" | ||
28 | #else | ||
29 | |||
30 | void corelock_lock(struct corelock *cl) | ||
31 | { | ||
32 | const unsigned int core = CURRENT_CORE; | ||
33 | const unsigned int othercore = 1 - core; | ||
34 | |||
35 | cl->myl[core] = core; | ||
36 | cl->turn = othercore; | ||
37 | |||
38 | for (;;) | ||
39 | { | ||
40 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
41 | break; | ||
42 | } | ||
43 | } | ||
44 | |||
45 | int corelock_try_lock(struct corelock *cl) | ||
46 | { | ||
47 | const unsigned int core = CURRENT_CORE; | ||
48 | const unsigned int othercore = 1 - core; | ||
49 | |||
50 | cl->myl[core] = core; | ||
51 | cl->turn = othercore; | ||
52 | |||
53 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
54 | { | ||
55 | return 1; | ||
56 | } | ||
57 | |||
58 | cl->myl[core] = 0; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | void corelock_unlock(struct corelock *cl) | ||
63 | { | ||
64 | cl->myl[CURRENT_CORE] = 0; | ||
65 | } | ||
66 | |||
67 | #endif | ||
diff --git a/firmware/drivers/audio/as3514.c b/firmware/drivers/audio/as3514.c index 6f531be095..d8572df4d6 100644 --- a/firmware/drivers/audio/as3514.c +++ b/firmware/drivers/audio/as3514.c | |||
@@ -23,6 +23,7 @@ | |||
23 | ****************************************************************************/ | 23 | ****************************************************************************/ |
24 | 24 | ||
25 | #include "cpu.h" | 25 | #include "cpu.h" |
26 | #include "kernel.h" | ||
26 | #include "debug.h" | 27 | #include "debug.h" |
27 | #include "system.h" | 28 | #include "system.h" |
28 | #include "kernel.h" | 29 | #include "kernel.h" |
diff --git a/firmware/export/system.h b/firmware/export/system.h index 25f9287618..1dab352071 100644 --- a/firmware/export/system.h +++ b/firmware/export/system.h | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <stdbool.h> | 25 | #include <stdbool.h> |
26 | #include <stdint.h> | 26 | #include <stdint.h> |
27 | |||
28 | #include "cpu.h" | 27 | #include "cpu.h" |
29 | #include "gcc_extensions.h" /* for LIKELY/UNLIKELY */ | 28 | #include "gcc_extensions.h" /* for LIKELY/UNLIKELY */ |
30 | 29 | ||
@@ -86,6 +85,10 @@ int get_cpu_boost_counter(void); | |||
86 | 85 | ||
87 | #define BAUDRATE 9600 | 86 | #define BAUDRATE 9600 |
88 | 87 | ||
88 | /* wrap-safe macros for tick comparison */ | ||
89 | #define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0) | ||
90 | #define TIME_BEFORE(a,b) TIME_AFTER(b,a) | ||
91 | |||
89 | #ifndef NULL | 92 | #ifndef NULL |
90 | #define NULL ((void*)0) | 93 | #define NULL ((void*)0) |
91 | #endif | 94 | #endif |
diff --git a/firmware/firmware.make b/firmware/firmware.make index 570087bf45..8e2d475683 100644 --- a/firmware/firmware.make +++ b/firmware/firmware.make | |||
@@ -7,7 +7,8 @@ | |||
7 | # $Id$ | 7 | # $Id$ |
8 | # | 8 | # |
9 | 9 | ||
10 | INCLUDES += -I$(FIRMDIR) -I$(FIRMDIR)/export -I$(FIRMDIR)/drivers -I$(FIRMDIR)/include | 10 | INCLUDES += -I$(FIRMDIR) -I$(FIRMDIR)/export -I$(FIRMDIR)/drivers \ |
11 | -I$(FIRMDIR)/include -I$(FIRMDIR)/kernel/include | ||
11 | ifndef APP_TYPE | 12 | ifndef APP_TYPE |
12 | INCLUDES += -I$(FIRMDIR)/libc/include | 13 | INCLUDES += -I$(FIRMDIR)/libc/include |
13 | endif | 14 | endif |
diff --git a/firmware/general.c b/firmware/general.c index d6c1d2f465..c70d21c4a0 100644 --- a/firmware/general.c +++ b/firmware/general.c | |||
@@ -20,8 +20,6 @@ | |||
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | 21 | ||
22 | #include <stdio.h> | 22 | #include <stdio.h> |
23 | #include "config.h" | ||
24 | #include "system.h" | ||
25 | #include "kernel.h" | 23 | #include "kernel.h" |
26 | #include "general.h" | 24 | #include "general.h" |
27 | #include "file.h" | 25 | #include "file.h" |
diff --git a/firmware/kernel/corelock.c b/firmware/kernel/corelock.c new file mode 100644 index 0000000000..53d08a9069 --- /dev/null +++ b/firmware/kernel/corelock.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2007 by Daniel Ankers | ||
11 | * | ||
12 | * PP5002 and PP502x SoC threading support | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version 2 | ||
17 | * of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
20 | * KIND, either express or implied. | ||
21 | * | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | #include <string.h> | ||
25 | #include "corelock.h" | ||
26 | |||
27 | /* Core locks using Peterson's mutual exclusion algorithm */ | ||
28 | |||
29 | |||
30 | /*--------------------------------------------------------------------------- | ||
31 | * Initialize the corelock structure. | ||
32 | *--------------------------------------------------------------------------- | ||
33 | */ | ||
34 | void corelock_init(struct corelock *cl) | ||
35 | { | ||
36 | memset(cl, 0, sizeof (*cl)); | ||
37 | } | ||
38 | |||
39 | /* other corelock methods are ASM-optimized */ | ||
40 | #include "asm/corelock.c" | ||
diff --git a/firmware/kernel/include/corelock.h b/firmware/kernel/include/corelock.h new file mode 100644 index 0000000000..79302e0e3c --- /dev/null +++ b/firmware/kernel/include/corelock.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Ulf Ralberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | |||
23 | #ifndef CORELOCK_H | ||
24 | #define CORELOCK_H | ||
25 | |||
26 | #include "config.h" | ||
27 | |||
28 | #ifndef HAVE_CORELOCK_OBJECT | ||
29 | |||
30 | /* No atomic corelock op needed or just none defined */ | ||
31 | #define corelock_init(cl) | ||
32 | #define corelock_lock(cl) | ||
33 | #define corelock_try_lock(cl) | ||
34 | #define corelock_unlock(cl) | ||
35 | |||
36 | #else | ||
37 | |||
38 | /* No reliable atomic instruction available - use Peterson's algorithm */ | ||
39 | struct corelock | ||
40 | { | ||
41 | volatile unsigned char myl[NUM_CORES]; | ||
42 | volatile unsigned char turn; | ||
43 | } __attribute__((packed)); | ||
44 | |||
45 | /* Too big to inline everywhere */ | ||
46 | extern void corelock_init(struct corelock *cl); | ||
47 | extern void corelock_lock(struct corelock *cl); | ||
48 | extern int corelock_try_lock(struct corelock *cl); | ||
49 | extern void corelock_unlock(struct corelock *cl); | ||
50 | |||
51 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
52 | |||
53 | #endif /* CORELOCK_H */ | ||
diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h new file mode 100644 index 0000000000..fafff25ce4 --- /dev/null +++ b/firmware/kernel/include/kernel.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | #ifndef KERNEL_H | ||
22 | #define KERNEL_H | ||
23 | |||
24 | #include "config.h" | ||
25 | |||
26 | #include "system.h" | ||
27 | #include "queue.h" | ||
28 | #include "mutex.h" | ||
29 | #include "tick.h" | ||
30 | |||
31 | #ifdef INCLUDE_TIMEOUT_API | ||
32 | #include "timeout.h" | ||
33 | #endif | ||
34 | |||
35 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
36 | #include "semaphore.h" | ||
37 | #endif | ||
38 | |||
39 | #ifdef HAVE_CORELOCK_OBJECT | ||
40 | #include "corelock.h" | ||
41 | #endif | ||
42 | |||
43 | #define OBJ_WAIT_TIMEDOUT (-1) | ||
44 | #define OBJ_WAIT_FAILED 0 | ||
45 | #define OBJ_WAIT_SUCCEEDED 1 | ||
46 | |||
47 | #define TIMEOUT_BLOCK -1 | ||
48 | #define TIMEOUT_NOBLOCK 0 | ||
49 | |||
50 | static inline void kernel_init(void) | ||
51 | { | ||
52 | /* Init the threading API */ | ||
53 | init_threads(); | ||
54 | |||
55 | /* Other processors will not reach this point in a multicore build. | ||
56 | * In a single-core build with multiple cores they fall-through and | ||
57 | * sleep in cop_main without returning. */ | ||
58 | if (CURRENT_CORE == CPU) | ||
59 | { | ||
60 | init_queues(); | ||
61 | init_tick(); | ||
62 | #ifdef KDEV_INIT | ||
63 | kernel_device_init(); | ||
64 | #endif | ||
65 | } | ||
66 | } | ||
67 | |||
68 | |||
69 | #endif /* KERNEL_H */ | ||
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h new file mode 100644 index 0000000000..bcf5701bd9 --- /dev/null +++ b/firmware/kernel/include/mutex.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | #ifndef MUTEX_H | ||
23 | #define MUTEX_H | ||
24 | |||
25 | #include <stdbool.h> | ||
26 | #include "config.h" | ||
27 | #include "thread.h" | ||
28 | |||
29 | struct mutex | ||
30 | { | ||
31 | struct thread_entry *queue; /* waiter list */ | ||
32 | int recursion; /* lock owner recursion count */ | ||
33 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
34 | struct blocker blocker; /* priority inheritance info | ||
35 | for waiters */ | ||
36 | bool no_preempt; /* don't allow higher-priority thread | ||
37 | to be scheduled even if woken */ | ||
38 | #else | ||
39 | struct thread_entry *thread; /* Indicates owner thread - an owner | ||
40 | implies a locked state - same goes | ||
41 | for priority scheduling | ||
42 | (in blocker struct for that) */ | ||
43 | #endif | ||
44 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | ||
45 | }; | ||
46 | |||
47 | extern void mutex_init(struct mutex *m); | ||
48 | extern void mutex_lock(struct mutex *m); | ||
49 | extern void mutex_unlock(struct mutex *m); | ||
50 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
51 | /* Deprecated temporary function to disable mutex preempting a thread on | ||
52 | * unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c - | ||
53 | * reliance on it is a bug! */ | ||
54 | static inline void mutex_set_preempt(struct mutex *m, bool preempt) | ||
55 | { m->no_preempt = !preempt; } | ||
56 | #else | ||
57 | /* Deprecated but needed for now - firmware/drivers/ata_mmc.c */ | ||
58 | static inline bool mutex_test(const struct mutex *m) | ||
59 | { return m->thread != NULL; } | ||
60 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
61 | |||
62 | #endif /* MUTEX_H */ | ||
diff --git a/firmware/export/kernel.h b/firmware/kernel/include/queue.h index 3cadefdf68..1b404f8297 100644 --- a/firmware/export/kernel.h +++ b/firmware/kernel/include/queue.h | |||
@@ -18,27 +18,14 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | #ifndef _KERNEL_H_ | ||
22 | #define _KERNEL_H_ | ||
23 | 21 | ||
24 | #include <stdbool.h> | 22 | #ifndef QUEUE_H |
25 | #include <inttypes.h> | 23 | #define QUEUE_H |
26 | #include "config.h" | ||
27 | 24 | ||
25 | #include <stdint.h> | ||
26 | #include "config.h" | ||
28 | #include "thread.h" | 27 | #include "thread.h" |
29 | 28 | ||
30 | /* wrap-safe macros for tick comparison */ | ||
31 | #define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0) | ||
32 | #define TIME_BEFORE(a,b) TIME_AFTER(b,a) | ||
33 | |||
34 | #define HZ 100 /* number of ticks per second */ | ||
35 | |||
36 | #define MAX_NUM_TICK_TASKS 8 | ||
37 | |||
38 | #define MAX_NUM_QUEUES 32 | ||
39 | #define QUEUE_LENGTH 16 /* MUST be a power of 2 */ | ||
40 | #define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1) | ||
41 | |||
42 | /* System defined message ID's - |sign bit = 1|class|id| */ | 29 | /* System defined message ID's - |sign bit = 1|class|id| */ |
43 | /* Event class list */ | 30 | /* Event class list */ |
44 | #define SYS_EVENT_CLS_QUEUE 0 | 31 | #define SYS_EVENT_CLS_QUEUE 0 |
@@ -85,10 +72,9 @@ | |||
85 | 72 | ||
86 | #define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) | 73 | #define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT) |
87 | 74 | ||
88 | #ifndef TIMEOUT_BLOCK | 75 | #define MAX_NUM_QUEUES 32 |
89 | #define TIMEOUT_BLOCK -1 | 76 | #define QUEUE_LENGTH 16 /* MUST be a power of 2 */ |
90 | #define TIMEOUT_NOBLOCK 0 | 77 | #define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1) |
91 | #endif | ||
92 | 78 | ||
93 | struct queue_event | 79 | struct queue_event |
94 | { | 80 | { |
@@ -137,100 +123,6 @@ struct event_queue | |||
137 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | 123 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ |
138 | }; | 124 | }; |
139 | 125 | ||
140 | struct mutex | ||
141 | { | ||
142 | struct thread_entry *queue; /* waiter list */ | ||
143 | int recursion; /* lock owner recursion count */ | ||
144 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
145 | struct blocker blocker; /* priority inheritance info | ||
146 | for waiters */ | ||
147 | bool no_preempt; /* don't allow higher-priority thread | ||
148 | to be scheduled even if woken */ | ||
149 | #else | ||
150 | struct thread_entry *thread; /* Indicates owner thread - an owner | ||
151 | implies a locked state - same goes | ||
152 | for priority scheduling | ||
153 | (in blocker struct for that) */ | ||
154 | #endif | ||
155 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | ||
156 | }; | ||
157 | |||
158 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
159 | struct semaphore | ||
160 | { | ||
161 | struct thread_entry *queue; /* Waiter list */ | ||
162 | int volatile count; /* # of waits remaining before unsignaled */ | ||
163 | int max; /* maximum # of waits to remain signaled */ | ||
164 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | ||
165 | }; | ||
166 | #endif | ||
167 | |||
168 | /* global tick variable */ | ||
169 | #if defined(CPU_PP) && defined(BOOTLOADER) && \ | ||
170 | !defined(HAVE_BOOTLOADER_USB_MODE) | ||
171 | /* We don't enable interrupts in the PP bootloader unless USB mode is | ||
172 | enabled for it, so we need to fake the current_tick variable */ | ||
173 | #define current_tick (signed)(USEC_TIMER/10000) | ||
174 | |||
175 | static inline void call_tick_tasks(void) | ||
176 | { | ||
177 | } | ||
178 | #else | ||
179 | extern volatile long current_tick; | ||
180 | |||
181 | /* inline helper for implementing target interrupt handler */ | ||
182 | static inline void call_tick_tasks(void) | ||
183 | { | ||
184 | extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void); | ||
185 | void (**p)(void) = tick_funcs; | ||
186 | void (*fn)(void); | ||
187 | |||
188 | current_tick++; | ||
189 | |||
190 | for(fn = *p; fn != NULL; fn = *(++p)) | ||
191 | { | ||
192 | fn(); | ||
193 | } | ||
194 | } | ||
195 | #endif | ||
196 | |||
197 | /* kernel functions */ | ||
198 | extern void kernel_init(void) INIT_ATTR; | ||
199 | extern void yield(void); | ||
200 | extern unsigned sleep(unsigned ticks); | ||
201 | int tick_add_task(void (*f)(void)); | ||
202 | int tick_remove_task(void (*f)(void)); | ||
203 | extern void tick_start(unsigned int interval_in_ms) INIT_ATTR; | ||
204 | |||
205 | #ifdef INCLUDE_TIMEOUT_API | ||
206 | struct timeout; | ||
207 | |||
208 | /* timeout callback type | ||
209 | * tmo - pointer to struct timeout associated with event | ||
210 | * return next interval or <= 0 to stop event | ||
211 | */ | ||
212 | #define MAX_NUM_TIMEOUTS 8 | ||
213 | typedef int (* timeout_cb_type)(struct timeout *tmo); | ||
214 | |||
215 | struct timeout | ||
216 | { | ||
217 | timeout_cb_type callback;/* callback - returning false cancels */ | ||
218 | intptr_t data; /* data passed to callback */ | ||
219 | long expires; /* expiration tick */ | ||
220 | }; | ||
221 | |||
222 | void timeout_register(struct timeout *tmo, timeout_cb_type callback, | ||
223 | int ticks, intptr_t data); | ||
224 | void timeout_cancel(struct timeout *tmo); | ||
225 | #endif /* INCLUDE_TIMEOUT_API */ | ||
226 | |||
227 | #define STATE_NONSIGNALED 0 | ||
228 | #define STATE_SIGNALED 1 | ||
229 | |||
230 | #define OBJ_WAIT_TIMEDOUT (-1) | ||
231 | #define OBJ_WAIT_FAILED 0 | ||
232 | #define OBJ_WAIT_SUCCEEDED 1 | ||
233 | |||
234 | extern void queue_init(struct event_queue *q, bool register_queue); | 126 | extern void queue_init(struct event_queue *q, bool register_queue); |
235 | extern void queue_delete(struct event_queue *q); | 127 | extern void queue_delete(struct event_queue *q); |
236 | extern void queue_wait(struct event_queue *q, struct queue_event *ev); | 128 | extern void queue_wait(struct event_queue *q, struct queue_event *ev); |
@@ -260,26 +152,6 @@ extern void queue_clear(struct event_queue* q); | |||
260 | extern void queue_remove_from_head(struct event_queue *q, long id); | 152 | extern void queue_remove_from_head(struct event_queue *q, long id); |
261 | extern int queue_count(const struct event_queue *q); | 153 | extern int queue_count(const struct event_queue *q); |
262 | extern int queue_broadcast(long id, intptr_t data); | 154 | extern int queue_broadcast(long id, intptr_t data); |
155 | extern void init_queues(void); | ||
263 | 156 | ||
264 | extern void mutex_init(struct mutex *m); | 157 | #endif /* QUEUE_H */ |
265 | extern void mutex_lock(struct mutex *m); | ||
266 | extern void mutex_unlock(struct mutex *m); | ||
267 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
268 | /* Deprecated temporary function to disable mutex preempting a thread on | ||
269 | * unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c - | ||
270 | * reliance on it is a bug! */ | ||
271 | static inline void mutex_set_preempt(struct mutex *m, bool preempt) | ||
272 | { m->no_preempt = !preempt; } | ||
273 | #else | ||
274 | /* Deprecated but needed for now - firmware/drivers/ata_mmc.c */ | ||
275 | static inline bool mutex_test(const struct mutex *m) | ||
276 | { return m->thread != NULL; } | ||
277 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
278 | |||
279 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
280 | extern void semaphore_init(struct semaphore *s, int max, int start); | ||
281 | extern int semaphore_wait(struct semaphore *s, int timeout); | ||
282 | extern void semaphore_release(struct semaphore *s); | ||
283 | #endif /* HAVE_SEMAPHORE_OBJECTS */ | ||
284 | |||
285 | #endif /* _KERNEL_H_ */ | ||
diff --git a/firmware/kernel/include/semaphore.h b/firmware/kernel/include/semaphore.h new file mode 100644 index 0000000000..40e60bb88d --- /dev/null +++ b/firmware/kernel/include/semaphore.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | #ifndef SEMAPHORE_H | ||
23 | #define SEMAPHORE_H | ||
24 | |||
25 | #include "config.h" | ||
26 | #include "thread.h" | ||
27 | |||
28 | struct semaphore | ||
29 | { | ||
30 | struct thread_entry *queue; /* Waiter list */ | ||
31 | int volatile count; /* # of waits remaining before unsignaled */ | ||
32 | int max; /* maximum # of waits to remain signaled */ | ||
33 | IF_COP( struct corelock cl; ) /* multiprocessor sync */ | ||
34 | }; | ||
35 | |||
36 | extern void semaphore_init(struct semaphore *s, int max, int start); | ||
37 | extern int semaphore_wait(struct semaphore *s, int timeout); | ||
38 | extern void semaphore_release(struct semaphore *s); | ||
39 | |||
40 | #endif /* SEMAPHORE_H */ | ||
diff --git a/firmware/export/thread.h b/firmware/kernel/include/thread.h index da395b8ffa..9cc33b23ae 100644 --- a/firmware/export/thread.h +++ b/firmware/kernel/include/thread.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <stddef.h> | 27 | #include <stddef.h> |
28 | #include <stdbool.h> | 28 | #include <stdbool.h> |
29 | #include "gcc_extensions.h" | 29 | #include "gcc_extensions.h" |
30 | #include "corelock.h" | ||
30 | 31 | ||
31 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works | 32 | /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works |
32 | * by giving high priority threads more CPU time than lower priority threads | 33 | * by giving high priority threads more CPU time than lower priority threads |
@@ -63,6 +64,7 @@ | |||
63 | #define IO_PRIORITY_IMMEDIATE 0 | 64 | #define IO_PRIORITY_IMMEDIATE 0 |
64 | #define IO_PRIORITY_BACKGROUND 32 | 65 | #define IO_PRIORITY_BACKGROUND 32 |
65 | 66 | ||
67 | |||
66 | #if CONFIG_CODEC == SWCODEC | 68 | #if CONFIG_CODEC == SWCODEC |
67 | # ifdef HAVE_HARDWARE_CLICK | 69 | # ifdef HAVE_HARDWARE_CLICK |
68 | # define BASETHREADS 17 | 70 | # define BASETHREADS 17 |
@@ -78,7 +80,6 @@ | |||
78 | #endif | 80 | #endif |
79 | 81 | ||
80 | #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS) | 82 | #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS) |
81 | |||
82 | /* | 83 | /* |
83 | * We need more stack when we run under a host | 84 | * We need more stack when we run under a host |
84 | * maybe more expensive C lib functions? | 85 | * maybe more expensive C lib functions? |
@@ -99,23 +100,6 @@ struct regs | |||
99 | #include "asm/thread.h" | 100 | #include "asm/thread.h" |
100 | #endif /* HAVE_SDL_THREADS */ | 101 | #endif /* HAVE_SDL_THREADS */ |
101 | 102 | ||
102 | #ifdef CPU_PP | ||
103 | #ifdef HAVE_CORELOCK_OBJECT | ||
104 | /* No reliable atomic instruction available - use Peterson's algorithm */ | ||
105 | struct corelock | ||
106 | { | ||
107 | volatile unsigned char myl[NUM_CORES]; | ||
108 | volatile unsigned char turn; | ||
109 | } __attribute__((packed)); | ||
110 | |||
111 | /* Too big to inline everywhere */ | ||
112 | void corelock_init(struct corelock *cl); | ||
113 | void corelock_lock(struct corelock *cl); | ||
114 | int corelock_try_lock(struct corelock *cl); | ||
115 | void corelock_unlock(struct corelock *cl); | ||
116 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
117 | #endif /* CPU_PP */ | ||
118 | |||
119 | /* NOTE: The use of the word "queue" may also refer to a linked list of | 103 | /* NOTE: The use of the word "queue" may also refer to a linked list of |
120 | threads being maintained that are normally dealt with in FIFO order | 104 | threads being maintained that are normally dealt with in FIFO order |
121 | and not necessarily kernel event_queue */ | 105 | and not necessarily kernel event_queue */ |
@@ -150,14 +134,6 @@ struct thread_list | |||
150 | struct thread_entry *next; /* Next thread in a list */ | 134 | struct thread_entry *next; /* Next thread in a list */ |
151 | }; | 135 | }; |
152 | 136 | ||
153 | #ifndef HAVE_CORELOCK_OBJECT | ||
154 | /* No atomic corelock op needed or just none defined */ | ||
155 | #define corelock_init(cl) | ||
156 | #define corelock_lock(cl) | ||
157 | #define corelock_try_lock(cl) | ||
158 | #define corelock_unlock(cl) | ||
159 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
160 | |||
161 | #ifdef HAVE_PRIORITY_SCHEDULING | 137 | #ifdef HAVE_PRIORITY_SCHEDULING |
162 | struct blocker | 138 | struct blocker |
163 | { | 139 | { |
@@ -307,6 +283,9 @@ struct core_entry | |||
307 | #endif /* NUM_CORES */ | 283 | #endif /* NUM_CORES */ |
308 | }; | 284 | }; |
309 | 285 | ||
286 | extern void yield(void); | ||
287 | extern unsigned sleep(unsigned ticks); | ||
288 | |||
310 | #ifdef HAVE_PRIORITY_SCHEDULING | 289 | #ifdef HAVE_PRIORITY_SCHEDULING |
311 | #define IF_PRIO(...) __VA_ARGS__ | 290 | #define IF_PRIO(...) __VA_ARGS__ |
312 | #define IFN_PRIO(...) | 291 | #define IFN_PRIO(...) |
diff --git a/firmware/kernel/include/tick.h b/firmware/kernel/include/tick.h new file mode 100644 index 0000000000..9810f4a1e5 --- /dev/null +++ b/firmware/kernel/include/tick.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | #ifndef TICK_H | ||
22 | #define TICK_H | ||
23 | |||
24 | #include "config.h" | ||
25 | #include "system.h" /* for NULL */ | ||
26 | extern void init_tick(void); | ||
27 | |||
28 | #define HZ 100 /* number of ticks per second */ | ||
29 | |||
30 | #define MAX_NUM_TICK_TASKS 8 | ||
31 | |||
32 | /* global tick variable */ | ||
33 | #if defined(CPU_PP) && defined(BOOTLOADER) && \ | ||
34 | !defined(HAVE_BOOTLOADER_USB_MODE) | ||
35 | /* We don't enable interrupts in the PP bootloader unless USB mode is | ||
36 | enabled for it, so we need to fake the current_tick variable */ | ||
37 | #define current_tick (signed)(USEC_TIMER/10000) | ||
38 | |||
39 | static inline void call_tick_tasks(void) | ||
40 | { | ||
41 | } | ||
42 | #else | ||
43 | extern volatile long current_tick; | ||
44 | |||
45 | /* inline helper for implementing target interrupt handler */ | ||
46 | static inline void call_tick_tasks(void) | ||
47 | { | ||
48 | extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void); | ||
49 | void (**p)(void) = tick_funcs; | ||
50 | void (*fn)(void); | ||
51 | |||
52 | current_tick++; | ||
53 | |||
54 | for(fn = *p; fn != NULL; fn = *(++p)) | ||
55 | { | ||
56 | fn(); | ||
57 | } | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | /* implemented in target tree */ | ||
62 | extern void tick_start(unsigned int interval_in_ms) INIT_ATTR; | ||
63 | |||
64 | extern int tick_add_task(void (*f)(void)); | ||
65 | extern int tick_remove_task(void (*f)(void)); | ||
66 | |||
67 | #endif /* TICK_H */ | ||
diff --git a/firmware/kernel/include/timeout.h b/firmware/kernel/include/timeout.h new file mode 100644 index 0000000000..0b7c52ba4c --- /dev/null +++ b/firmware/kernel/include/timeout.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | #ifndef _KERNEL_H_ | ||
22 | #define _KERNEL_H_ | ||
23 | |||
24 | #include "config.h" | ||
25 | |||
26 | struct timeout; | ||
27 | |||
28 | /* timeout callback type | ||
29 | * tmo - pointer to struct timeout associated with event | ||
30 | * return next interval or <= 0 to stop event | ||
31 | */ | ||
32 | #define MAX_NUM_TIMEOUTS 8 | ||
33 | typedef int (* timeout_cb_type)(struct timeout *tmo); | ||
34 | |||
35 | struct timeout | ||
36 | { | ||
37 | timeout_cb_type callback;/* callback - returning false cancels */ | ||
38 | intptr_t data; /* data passed to callback */ | ||
39 | long expires; /* expiration tick */ | ||
40 | }; | ||
41 | |||
42 | void timeout_register(struct timeout *tmo, timeout_cb_type callback, | ||
43 | int ticks, intptr_t data); | ||
44 | void timeout_cancel(struct timeout *tmo); | ||
45 | |||
46 | #endif /* _KERNEL_H_ */ | ||
diff --git a/firmware/kernel/kernel-internal.h b/firmware/kernel/kernel-internal.h new file mode 100644 index 0000000000..51c589ac8f --- /dev/null +++ b/firmware/kernel/kernel-internal.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Ulf Ralberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | #ifndef KERNEL_INTERNAL_H | ||
23 | #define KERNEL_INTERNAL_H | ||
24 | |||
25 | #include "config.h" | ||
26 | #include "debug.h" | ||
27 | |||
28 | /* Make this nonzero to enable more elaborate checks on objects */ | ||
29 | #if defined(DEBUG) || defined(SIMULATOR) | ||
30 | #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/ | ||
31 | #else | ||
32 | #define KERNEL_OBJECT_CHECKS 0 | ||
33 | #endif | ||
34 | |||
35 | #if KERNEL_OBJECT_CHECKS | ||
36 | #ifdef SIMULATOR | ||
37 | #include <stdlib.h> | ||
38 | #define KERNEL_ASSERT(exp, msg...) \ | ||
39 | ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } }) | ||
40 | #else | ||
41 | #define KERNEL_ASSERT(exp, msg...) \ | ||
42 | ({ if (!({ exp; })) panicf(msg); }) | ||
43 | #endif | ||
44 | #else | ||
45 | #define KERNEL_ASSERT(exp, msg...) ({}) | ||
46 | #endif | ||
47 | |||
48 | |||
49 | #endif /* KERNEL_INTERNAL_H */ | ||
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c new file mode 100644 index 0000000000..f1e4b3c722 --- /dev/null +++ b/firmware/kernel/mutex.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | |||
23 | /**************************************************************************** | ||
24 | * Simple mutex functions ;) | ||
25 | ****************************************************************************/ | ||
26 | |||
27 | #include <stdbool.h> | ||
28 | #include "config.h" | ||
29 | #include "system.h" | ||
30 | #include "mutex.h" | ||
31 | #include "corelock.h" | ||
32 | #include "thread-internal.h" | ||
33 | #include "kernel-internal.h" | ||
34 | |||
35 | static inline void __attribute__((always_inline)) | ||
36 | mutex_set_thread(struct mutex *mtx, struct thread_entry *td) | ||
37 | { | ||
38 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
39 | mtx->blocker.thread = td; | ||
40 | #else | ||
41 | mtx->thread = td; | ||
42 | #endif | ||
43 | } | ||
44 | |||
45 | static inline struct thread_entry * __attribute__((always_inline)) | ||
46 | mutex_get_thread(volatile struct mutex *mtx) | ||
47 | { | ||
48 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
49 | return mtx->blocker.thread; | ||
50 | #else | ||
51 | return mtx->thread; | ||
52 | #endif | ||
53 | } | ||
54 | |||
55 | /* Initialize a mutex object - call before any use and do not call again once | ||
56 | * the object is available to other threads */ | ||
57 | void mutex_init(struct mutex *m) | ||
58 | { | ||
59 | corelock_init(&m->cl); | ||
60 | m->queue = NULL; | ||
61 | m->recursion = 0; | ||
62 | mutex_set_thread(m, NULL); | ||
63 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
64 | m->blocker.priority = PRIORITY_IDLE; | ||
65 | m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer; | ||
66 | m->no_preempt = false; | ||
67 | #endif | ||
68 | } | ||
69 | |||
70 | /* Gain ownership of a mutex object or block until it becomes free */ | ||
71 | void mutex_lock(struct mutex *m) | ||
72 | { | ||
73 | struct thread_entry *current = thread_self_entry(); | ||
74 | |||
75 | if(current == mutex_get_thread(m)) | ||
76 | { | ||
77 | /* current thread already owns this mutex */ | ||
78 | m->recursion++; | ||
79 | return; | ||
80 | } | ||
81 | |||
82 | /* lock out other cores */ | ||
83 | corelock_lock(&m->cl); | ||
84 | |||
85 | /* must read thread again inside cs (a multiprocessor concern really) */ | ||
86 | if(LIKELY(mutex_get_thread(m) == NULL)) | ||
87 | { | ||
88 | /* lock is open */ | ||
89 | mutex_set_thread(m, current); | ||
90 | corelock_unlock(&m->cl); | ||
91 | return; | ||
92 | } | ||
93 | |||
94 | /* block until the lock is open... */ | ||
95 | IF_COP( current->obj_cl = &m->cl; ) | ||
96 | IF_PRIO( current->blocker = &m->blocker; ) | ||
97 | current->bqp = &m->queue; | ||
98 | |||
99 | disable_irq(); | ||
100 | block_thread(current); | ||
101 | |||
102 | corelock_unlock(&m->cl); | ||
103 | |||
104 | /* ...and turn control over to next thread */ | ||
105 | switch_thread(); | ||
106 | } | ||
107 | |||
108 | /* Release ownership of a mutex object - only owning thread must call this */ | ||
109 | void mutex_unlock(struct mutex *m) | ||
110 | { | ||
111 | /* unlocker not being the owner is an unlocking violation */ | ||
112 | KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(), | ||
113 | "mutex_unlock->wrong thread (%s != %s)\n", | ||
114 | mutex_get_thread(m)->name, | ||
115 | thread_self_entry()->name); | ||
116 | |||
117 | if(m->recursion > 0) | ||
118 | { | ||
119 | /* this thread still owns lock */ | ||
120 | m->recursion--; | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | /* lock out other cores */ | ||
125 | corelock_lock(&m->cl); | ||
126 | |||
127 | /* transfer to next queued thread if any */ | ||
128 | if(LIKELY(m->queue == NULL)) | ||
129 | { | ||
130 | /* no threads waiting - open the lock */ | ||
131 | mutex_set_thread(m, NULL); | ||
132 | corelock_unlock(&m->cl); | ||
133 | return; | ||
134 | } | ||
135 | else | ||
136 | { | ||
137 | const int oldlevel = disable_irq_save(); | ||
138 | /* Tranfer of owning thread is handled in the wakeup protocol | ||
139 | * if priorities are enabled otherwise just set it from the | ||
140 | * queue head. */ | ||
141 | IFN_PRIO( mutex_set_thread(m, m->queue); ) | ||
142 | IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue); | ||
143 | restore_irq(oldlevel); | ||
144 | |||
145 | corelock_unlock(&m->cl); | ||
146 | |||
147 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
148 | if((result & THREAD_SWITCH) && !m->no_preempt) | ||
149 | switch_thread(); | ||
150 | #endif | ||
151 | } | ||
152 | } | ||
diff --git a/firmware/kernel.c b/firmware/kernel/queue.c index a264ceb9e4..379e3f62c8 100644 --- a/firmware/kernel.c +++ b/firmware/kernel/queue.c | |||
@@ -18,51 +18,16 @@ | |||
18 | * KIND, either express or implied. | 18 | * KIND, either express or implied. |
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | #include <stdlib.h> | 21 | |
22 | #include <string.h> | 22 | #include <string.h> |
23 | #include "config.h" | 23 | #include "config.h" |
24 | #include "kernel.h" | 24 | #include "kernel.h" |
25 | #include "thread.h" | ||
26 | #include "cpu.h" | ||
27 | #include "system.h" | 25 | #include "system.h" |
28 | #include "panic.h" | 26 | #include "queue.h" |
29 | #include "debug.h" | 27 | #include "corelock.h" |
28 | #include "kernel-internal.h" | ||
30 | #include "general.h" | 29 | #include "general.h" |
31 | 30 | #include "panic.h" | |
32 | /* Make this nonzero to enable more elaborate checks on objects */ | ||
33 | #if defined(DEBUG) || defined(SIMULATOR) | ||
34 | #define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/ | ||
35 | #else | ||
36 | #define KERNEL_OBJECT_CHECKS 0 | ||
37 | #endif | ||
38 | |||
39 | #if KERNEL_OBJECT_CHECKS | ||
40 | #ifdef SIMULATOR | ||
41 | #define KERNEL_ASSERT(exp, msg...) \ | ||
42 | ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } }) | ||
43 | #else | ||
44 | #define KERNEL_ASSERT(exp, msg...) \ | ||
45 | ({ if (!({ exp; })) panicf(msg); }) | ||
46 | #endif | ||
47 | #else | ||
48 | #define KERNEL_ASSERT(exp, msg...) ({}) | ||
49 | #endif | ||
50 | |||
51 | #if !defined(CPU_PP) || !defined(BOOTLOADER) || \ | ||
52 | defined(HAVE_BOOTLOADER_USB_MODE) | ||
53 | volatile long current_tick SHAREDDATA_ATTR = 0; | ||
54 | #endif | ||
55 | |||
56 | /* Unless otherwise defined, do nothing */ | ||
57 | #ifndef YIELD_KERNEL_HOOK | ||
58 | #define YIELD_KERNEL_HOOK() false | ||
59 | #endif | ||
60 | #ifndef SLEEP_KERNEL_HOOK | ||
61 | #define SLEEP_KERNEL_HOOK(ticks) false | ||
62 | #endif | ||
63 | |||
64 | /* List of tick tasks - final element always NULL for termination */ | ||
65 | void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void); | ||
66 | 31 | ||
67 | /* This array holds all queues that are initiated. It is used for broadcast. */ | 32 | /* This array holds all queues that are initiated. It is used for broadcast. */ |
68 | static struct | 33 | static struct |
@@ -74,194 +39,6 @@ static struct | |||
74 | } all_queues SHAREDBSS_ATTR; | 39 | } all_queues SHAREDBSS_ATTR; |
75 | 40 | ||
76 | /**************************************************************************** | 41 | /**************************************************************************** |
77 | * Standard kernel stuff | ||
78 | ****************************************************************************/ | ||
79 | void kernel_init(void) | ||
80 | { | ||
81 | /* Init the threading API */ | ||
82 | init_threads(); | ||
83 | |||
84 | /* Other processors will not reach this point in a multicore build. | ||
85 | * In a single-core build with multiple cores they fall-through and | ||
86 | * sleep in cop_main without returning. */ | ||
87 | if (CURRENT_CORE == CPU) | ||
88 | { | ||
89 | memset(tick_funcs, 0, sizeof(tick_funcs)); | ||
90 | memset(&all_queues, 0, sizeof(all_queues)); | ||
91 | corelock_init(&all_queues.cl); | ||
92 | tick_start(1000/HZ); | ||
93 | #ifdef KDEV_INIT | ||
94 | kernel_device_init(); | ||
95 | #endif | ||
96 | } | ||
97 | } | ||
98 | |||
99 | /**************************************************************************** | ||
100 | * Timer tick - Timer initialization and interrupt handler is defined at | ||
101 | * the target level. | ||
102 | ****************************************************************************/ | ||
103 | int tick_add_task(void (*f)(void)) | ||
104 | { | ||
105 | int oldlevel = disable_irq_save(); | ||
106 | void **arr = (void **)tick_funcs; | ||
107 | void **p = find_array_ptr(arr, f); | ||
108 | |||
109 | /* Add a task if there is room */ | ||
110 | if(p - arr < MAX_NUM_TICK_TASKS) | ||
111 | { | ||
112 | *p = f; /* If already in list, no problem. */ | ||
113 | } | ||
114 | else | ||
115 | { | ||
116 | panicf("Error! tick_add_task(): out of tasks"); | ||
117 | } | ||
118 | |||
119 | restore_irq(oldlevel); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | int tick_remove_task(void (*f)(void)) | ||
124 | { | ||
125 | int oldlevel = disable_irq_save(); | ||
126 | int rc = remove_array_ptr((void **)tick_funcs, f); | ||
127 | restore_irq(oldlevel); | ||
128 | return rc; | ||
129 | } | ||
130 | |||
131 | /**************************************************************************** | ||
132 | * Tick-based interval timers/one-shots - be mindful this is not really | ||
133 | * intended for continuous timers but for events that need to run for a short | ||
134 | * time and be cancelled without further software intervention. | ||
135 | ****************************************************************************/ | ||
136 | #ifdef INCLUDE_TIMEOUT_API | ||
137 | /* list of active timeout events */ | ||
138 | static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1]; | ||
139 | |||
140 | /* timeout tick task - calls event handlers when they expire | ||
141 | * Event handlers may alter expiration, callback and data during operation. | ||
142 | */ | ||
143 | static void timeout_tick(void) | ||
144 | { | ||
145 | unsigned long tick = current_tick; | ||
146 | struct timeout **p = tmo_list; | ||
147 | struct timeout *curr; | ||
148 | |||
149 | for(curr = *p; curr != NULL; curr = *(++p)) | ||
150 | { | ||
151 | int ticks; | ||
152 | |||
153 | if(TIME_BEFORE(tick, curr->expires)) | ||
154 | continue; | ||
155 | |||
156 | /* this event has expired - call callback */ | ||
157 | ticks = curr->callback(curr); | ||
158 | if(ticks > 0) | ||
159 | { | ||
160 | curr->expires = tick + ticks; /* reload */ | ||
161 | } | ||
162 | else | ||
163 | { | ||
164 | timeout_cancel(curr); /* cancel */ | ||
165 | } | ||
166 | } | ||
167 | } | ||
168 | |||
169 | /* Cancels a timeout callback - can be called from the ISR */ | ||
170 | void timeout_cancel(struct timeout *tmo) | ||
171 | { | ||
172 | int oldlevel = disable_irq_save(); | ||
173 | int rc = remove_array_ptr((void **)tmo_list, tmo); | ||
174 | |||
175 | if(rc >= 0 && *tmo_list == NULL) | ||
176 | { | ||
177 | tick_remove_task(timeout_tick); /* Last one - remove task */ | ||
178 | } | ||
179 | |||
180 | restore_irq(oldlevel); | ||
181 | } | ||
182 | |||
183 | /* Adds a timeout callback - calling with an active timeout resets the | ||
184 | interval - can be called from the ISR */ | ||
185 | void timeout_register(struct timeout *tmo, timeout_cb_type callback, | ||
186 | int ticks, intptr_t data) | ||
187 | { | ||
188 | int oldlevel; | ||
189 | void **arr, **p; | ||
190 | |||
191 | if(tmo == NULL) | ||
192 | return; | ||
193 | |||
194 | oldlevel = disable_irq_save(); | ||
195 | |||
196 | /* See if this one is already registered */ | ||
197 | arr = (void **)tmo_list; | ||
198 | p = find_array_ptr(arr, tmo); | ||
199 | |||
200 | if(p - arr < MAX_NUM_TIMEOUTS) | ||
201 | { | ||
202 | /* Vacancy */ | ||
203 | if(*p == NULL) | ||
204 | { | ||
205 | /* Not present */ | ||
206 | if(*tmo_list == NULL) | ||
207 | { | ||
208 | tick_add_task(timeout_tick); /* First one - add task */ | ||
209 | } | ||
210 | |||
211 | *p = tmo; | ||
212 | } | ||
213 | |||
214 | tmo->callback = callback; | ||
215 | tmo->data = data; | ||
216 | tmo->expires = current_tick + ticks; | ||
217 | } | ||
218 | |||
219 | restore_irq(oldlevel); | ||
220 | } | ||
221 | |||
222 | #endif /* INCLUDE_TIMEOUT_API */ | ||
223 | |||
224 | /**************************************************************************** | ||
225 | * Thread stuff | ||
226 | ****************************************************************************/ | ||
227 | |||
228 | /* Suspends a thread's execution for at least the specified number of ticks. | ||
229 | * May result in CPU core entering wait-for-interrupt mode if no other thread | ||
230 | * may be scheduled. | ||
231 | * | ||
232 | * NOTE: sleep(0) sleeps until the end of the current tick | ||
233 | * sleep(n) that doesn't result in rescheduling: | ||
234 | * n <= ticks suspended < n + 1 | ||
235 | * n to n+1 is a lower bound. Other factors may affect the actual time | ||
236 | * a thread is suspended before it runs again. | ||
237 | */ | ||
238 | unsigned sleep(unsigned ticks) | ||
239 | { | ||
240 | /* In certain situations, certain bootloaders in particular, a normal | ||
241 | * threading call is inappropriate. */ | ||
242 | if (SLEEP_KERNEL_HOOK(ticks)) | ||
243 | return 0; /* Handled */ | ||
244 | |||
245 | disable_irq(); | ||
246 | sleep_thread(ticks); | ||
247 | switch_thread(); | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | /* Elects another thread to run or, if no other thread may be made ready to | ||
252 | * run, immediately returns control back to the calling thread. | ||
253 | */ | ||
254 | void yield(void) | ||
255 | { | ||
256 | /* In certain situations, certain bootloaders in particular, a normal | ||
257 | * threading call is inappropriate. */ | ||
258 | if (YIELD_KERNEL_HOOK()) | ||
259 | return; /* handled */ | ||
260 | |||
261 | switch_thread(); | ||
262 | } | ||
263 | |||
264 | /**************************************************************************** | ||
265 | * Queue handling stuff | 42 | * Queue handling stuff |
266 | ****************************************************************************/ | 43 | ****************************************************************************/ |
267 | 44 | ||
@@ -1003,237 +780,7 @@ int queue_broadcast(long id, intptr_t data) | |||
1003 | return p - all_queues.queues; | 780 | return p - all_queues.queues; |
1004 | } | 781 | } |
1005 | 782 | ||
1006 | /**************************************************************************** | 783 | void init_queues(void) |
1007 | * Simple mutex functions ;) | ||
1008 | ****************************************************************************/ | ||
1009 | |||
1010 | static inline void __attribute__((always_inline)) | ||
1011 | mutex_set_thread(struct mutex *mtx, struct thread_entry *td) | ||
1012 | { | ||
1013 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1014 | mtx->blocker.thread = td; | ||
1015 | #else | ||
1016 | mtx->thread = td; | ||
1017 | #endif | ||
1018 | } | ||
1019 | |||
1020 | static inline struct thread_entry * __attribute__((always_inline)) | ||
1021 | mutex_get_thread(volatile struct mutex *mtx) | ||
1022 | { | ||
1023 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1024 | return mtx->blocker.thread; | ||
1025 | #else | ||
1026 | return mtx->thread; | ||
1027 | #endif | ||
1028 | } | ||
1029 | |||
1030 | /* Initialize a mutex object - call before any use and do not call again once | ||
1031 | * the object is available to other threads */ | ||
1032 | void mutex_init(struct mutex *m) | ||
1033 | { | ||
1034 | corelock_init(&m->cl); | ||
1035 | m->queue = NULL; | ||
1036 | m->recursion = 0; | ||
1037 | mutex_set_thread(m, NULL); | ||
1038 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1039 | m->blocker.priority = PRIORITY_IDLE; | ||
1040 | m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer; | ||
1041 | m->no_preempt = false; | ||
1042 | #endif | ||
1043 | } | ||
1044 | |||
1045 | /* Gain ownership of a mutex object or block until it becomes free */ | ||
1046 | void mutex_lock(struct mutex *m) | ||
1047 | { | ||
1048 | struct thread_entry *current = thread_self_entry(); | ||
1049 | |||
1050 | if(current == mutex_get_thread(m)) | ||
1051 | { | ||
1052 | /* current thread already owns this mutex */ | ||
1053 | m->recursion++; | ||
1054 | return; | ||
1055 | } | ||
1056 | |||
1057 | /* lock out other cores */ | ||
1058 | corelock_lock(&m->cl); | ||
1059 | |||
1060 | /* must read thread again inside cs (a multiprocessor concern really) */ | ||
1061 | if(LIKELY(mutex_get_thread(m) == NULL)) | ||
1062 | { | ||
1063 | /* lock is open */ | ||
1064 | mutex_set_thread(m, current); | ||
1065 | corelock_unlock(&m->cl); | ||
1066 | return; | ||
1067 | } | ||
1068 | |||
1069 | /* block until the lock is open... */ | ||
1070 | IF_COP( current->obj_cl = &m->cl; ) | ||
1071 | IF_PRIO( current->blocker = &m->blocker; ) | ||
1072 | current->bqp = &m->queue; | ||
1073 | |||
1074 | disable_irq(); | ||
1075 | block_thread(current); | ||
1076 | |||
1077 | corelock_unlock(&m->cl); | ||
1078 | |||
1079 | /* ...and turn control over to next thread */ | ||
1080 | switch_thread(); | ||
1081 | } | ||
1082 | |||
1083 | /* Release ownership of a mutex object - only owning thread must call this */ | ||
1084 | void mutex_unlock(struct mutex *m) | ||
1085 | { | ||
1086 | /* unlocker not being the owner is an unlocking violation */ | ||
1087 | KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(), | ||
1088 | "mutex_unlock->wrong thread (%s != %s)\n", | ||
1089 | mutex_get_thread(m)->name, | ||
1090 | thread_self_entry()->name); | ||
1091 | |||
1092 | if(m->recursion > 0) | ||
1093 | { | ||
1094 | /* this thread still owns lock */ | ||
1095 | m->recursion--; | ||
1096 | return; | ||
1097 | } | ||
1098 | |||
1099 | /* lock out other cores */ | ||
1100 | corelock_lock(&m->cl); | ||
1101 | |||
1102 | /* transfer to next queued thread if any */ | ||
1103 | if(LIKELY(m->queue == NULL)) | ||
1104 | { | ||
1105 | /* no threads waiting - open the lock */ | ||
1106 | mutex_set_thread(m, NULL); | ||
1107 | corelock_unlock(&m->cl); | ||
1108 | return; | ||
1109 | } | ||
1110 | else | ||
1111 | { | ||
1112 | const int oldlevel = disable_irq_save(); | ||
1113 | /* Tranfer of owning thread is handled in the wakeup protocol | ||
1114 | * if priorities are enabled otherwise just set it from the | ||
1115 | * queue head. */ | ||
1116 | IFN_PRIO( mutex_set_thread(m, m->queue); ) | ||
1117 | IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue); | ||
1118 | restore_irq(oldlevel); | ||
1119 | |||
1120 | corelock_unlock(&m->cl); | ||
1121 | |||
1122 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
1123 | if((result & THREAD_SWITCH) && !m->no_preempt) | ||
1124 | switch_thread(); | ||
1125 | #endif | ||
1126 | } | ||
1127 | } | ||
1128 | |||
1129 | /**************************************************************************** | ||
1130 | * Simple semaphore functions ;) | ||
1131 | ****************************************************************************/ | ||
1132 | #ifdef HAVE_SEMAPHORE_OBJECTS | ||
1133 | /* Initialize the semaphore object. | ||
1134 | * max = maximum up count the semaphore may assume (max >= 1) | ||
1135 | * start = initial count of semaphore (0 <= count <= max) */ | ||
1136 | void semaphore_init(struct semaphore *s, int max, int start) | ||
1137 | { | ||
1138 | KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, | ||
1139 | "semaphore_init->inv arg\n"); | ||
1140 | s->queue = NULL; | ||
1141 | s->max = max; | ||
1142 | s->count = start; | ||
1143 | corelock_init(&s->cl); | ||
1144 | } | ||
1145 | |||
1146 | /* Down the semaphore's count or wait for 'timeout' ticks for it to go up if | ||
1147 | * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may | ||
1148 | * safely be used in an ISR. */ | ||
1149 | int semaphore_wait(struct semaphore *s, int timeout) | ||
1150 | { | 784 | { |
1151 | int ret; | 785 | corelock_init(&all_queues.cl); |
1152 | int oldlevel; | ||
1153 | int count; | ||
1154 | |||
1155 | oldlevel = disable_irq_save(); | ||
1156 | corelock_lock(&s->cl); | ||
1157 | |||
1158 | count = s->count; | ||
1159 | |||
1160 | if(LIKELY(count > 0)) | ||
1161 | { | ||
1162 | /* count is not zero; down it */ | ||
1163 | s->count = count - 1; | ||
1164 | ret = OBJ_WAIT_SUCCEEDED; | ||
1165 | } | ||
1166 | else if(timeout == 0) | ||
1167 | { | ||
1168 | /* just polling it */ | ||
1169 | ret = OBJ_WAIT_TIMEDOUT; | ||
1170 | } | ||
1171 | else | ||
1172 | { | ||
1173 | /* too many waits - block until count is upped... */ | ||
1174 | struct thread_entry * current = thread_self_entry(); | ||
1175 | IF_COP( current->obj_cl = &s->cl; ) | ||
1176 | current->bqp = &s->queue; | ||
1177 | /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was | ||
1178 | * explicit in semaphore_release */ | ||
1179 | current->retval = OBJ_WAIT_TIMEDOUT; | ||
1180 | |||
1181 | if(timeout > 0) | ||
1182 | block_thread_w_tmo(current, timeout); /* ...or timed out... */ | ||
1183 | else | ||
1184 | block_thread(current); /* -timeout = infinite */ | ||
1185 | |||
1186 | corelock_unlock(&s->cl); | ||
1187 | |||
1188 | /* ...and turn control over to next thread */ | ||
1189 | switch_thread(); | ||
1190 | |||
1191 | return current->retval; | ||
1192 | } | ||
1193 | |||
1194 | corelock_unlock(&s->cl); | ||
1195 | restore_irq(oldlevel); | ||
1196 | |||
1197 | return ret; | ||
1198 | } | ||
1199 | |||
1200 | /* Up the semaphore's count and release any thread waiting at the head of the | ||
1201 | * queue. The count is saturated to the value of the 'max' parameter specified | ||
1202 | * in 'semaphore_init'. */ | ||
1203 | void semaphore_release(struct semaphore *s) | ||
1204 | { | ||
1205 | unsigned int result = THREAD_NONE; | ||
1206 | int oldlevel; | ||
1207 | |||
1208 | oldlevel = disable_irq_save(); | ||
1209 | corelock_lock(&s->cl); | ||
1210 | |||
1211 | if(LIKELY(s->queue != NULL)) | ||
1212 | { | ||
1213 | /* a thread was queued - wake it up and keep count at 0 */ | ||
1214 | KERNEL_ASSERT(s->count == 0, | ||
1215 | "semaphore_release->threads queued but count=%d!\n", s->count); | ||
1216 | s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ | ||
1217 | result = wakeup_thread(&s->queue); | ||
1218 | } | ||
1219 | else | ||
1220 | { | ||
1221 | int count = s->count; | ||
1222 | if(count < s->max) | ||
1223 | { | ||
1224 | /* nothing waiting - up it */ | ||
1225 | s->count = count + 1; | ||
1226 | } | ||
1227 | } | ||
1228 | |||
1229 | corelock_unlock(&s->cl); | ||
1230 | restore_irq(oldlevel); | ||
1231 | |||
1232 | #if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context) | ||
1233 | /* No thread switch if not thread context */ | ||
1234 | if((result & THREAD_SWITCH) && is_thread_context()) | ||
1235 | switch_thread(); | ||
1236 | #endif | ||
1237 | (void)result; | ||
1238 | } | 786 | } |
1239 | #endif /* HAVE_SEMAPHORE_OBJECTS */ | ||
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c new file mode 100644 index 0000000000..f9ff0ad987 --- /dev/null +++ b/firmware/kernel/semaphore.c | |||
@@ -0,0 +1,142 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | |||
23 | /**************************************************************************** | ||
24 | * Simple mutex functions ;) | ||
25 | ****************************************************************************/ | ||
26 | |||
27 | #include <stdbool.h> | ||
28 | #include "config.h" | ||
29 | #include "kernel.h" | ||
30 | #include "semaphore.h" | ||
31 | #include "kernel-internal.h" | ||
32 | #include "thread-internal.h" | ||
33 | |||
34 | /**************************************************************************** | ||
35 | * Simple semaphore functions ;) | ||
36 | ****************************************************************************/ | ||
37 | /* Initialize the semaphore object. | ||
38 | * max = maximum up count the semaphore may assume (max >= 1) | ||
39 | * start = initial count of semaphore (0 <= count <= max) */ | ||
40 | void semaphore_init(struct semaphore *s, int max, int start) | ||
41 | { | ||
42 | KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, | ||
43 | "semaphore_init->inv arg\n"); | ||
44 | s->queue = NULL; | ||
45 | s->max = max; | ||
46 | s->count = start; | ||
47 | corelock_init(&s->cl); | ||
48 | } | ||
49 | |||
50 | /* Down the semaphore's count or wait for 'timeout' ticks for it to go up if | ||
51 | * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may | ||
52 | * safely be used in an ISR. */ | ||
53 | int semaphore_wait(struct semaphore *s, int timeout) | ||
54 | { | ||
55 | int ret; | ||
56 | int oldlevel; | ||
57 | int count; | ||
58 | |||
59 | oldlevel = disable_irq_save(); | ||
60 | corelock_lock(&s->cl); | ||
61 | |||
62 | count = s->count; | ||
63 | |||
64 | if(LIKELY(count > 0)) | ||
65 | { | ||
66 | /* count is not zero; down it */ | ||
67 | s->count = count - 1; | ||
68 | ret = OBJ_WAIT_SUCCEEDED; | ||
69 | } | ||
70 | else if(timeout == 0) | ||
71 | { | ||
72 | /* just polling it */ | ||
73 | ret = OBJ_WAIT_TIMEDOUT; | ||
74 | } | ||
75 | else | ||
76 | { | ||
77 | /* too many waits - block until count is upped... */ | ||
78 | struct thread_entry * current = thread_self_entry(); | ||
79 | IF_COP( current->obj_cl = &s->cl; ) | ||
80 | current->bqp = &s->queue; | ||
81 | /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was | ||
82 | * explicit in semaphore_release */ | ||
83 | current->retval = OBJ_WAIT_TIMEDOUT; | ||
84 | |||
85 | if(timeout > 0) | ||
86 | block_thread_w_tmo(current, timeout); /* ...or timed out... */ | ||
87 | else | ||
88 | block_thread(current); /* -timeout = infinite */ | ||
89 | |||
90 | corelock_unlock(&s->cl); | ||
91 | |||
92 | /* ...and turn control over to next thread */ | ||
93 | switch_thread(); | ||
94 | |||
95 | return current->retval; | ||
96 | } | ||
97 | |||
98 | corelock_unlock(&s->cl); | ||
99 | restore_irq(oldlevel); | ||
100 | |||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* Up the semaphore's count and release any thread waiting at the head of the | ||
105 | * queue. The count is saturated to the value of the 'max' parameter specified | ||
106 | * in 'semaphore_init'. */ | ||
107 | void semaphore_release(struct semaphore *s) | ||
108 | { | ||
109 | unsigned int result = THREAD_NONE; | ||
110 | int oldlevel; | ||
111 | |||
112 | oldlevel = disable_irq_save(); | ||
113 | corelock_lock(&s->cl); | ||
114 | |||
115 | if(LIKELY(s->queue != NULL)) | ||
116 | { | ||
117 | /* a thread was queued - wake it up and keep count at 0 */ | ||
118 | KERNEL_ASSERT(s->count == 0, | ||
119 | "semaphore_release->threads queued but count=%d!\n", s->count); | ||
120 | s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ | ||
121 | result = wakeup_thread(&s->queue); | ||
122 | } | ||
123 | else | ||
124 | { | ||
125 | int count = s->count; | ||
126 | if(count < s->max) | ||
127 | { | ||
128 | /* nothing waiting - up it */ | ||
129 | s->count = count + 1; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | corelock_unlock(&s->cl); | ||
134 | restore_irq(oldlevel); | ||
135 | |||
136 | #if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context) | ||
137 | /* No thread switch if not thread context */ | ||
138 | if((result & THREAD_SWITCH) && is_thread_context()) | ||
139 | switch_thread(); | ||
140 | #endif | ||
141 | (void)result; | ||
142 | } | ||
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h new file mode 100644 index 0000000000..c2acdfbaa9 --- /dev/null +++ b/firmware/kernel/thread-internal.h | |||
@@ -0,0 +1,357 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Ulf Ralberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | #ifndef THREAD_H | ||
23 | #define THREAD_H | ||
24 | |||
25 | #include "config.h" | ||
26 | #include <inttypes.h> | ||
27 | #include <stddef.h> | ||
28 | #include <stdbool.h> | ||
29 | #include "gcc_extensions.h" | ||
30 | |||
31 | /* | ||
32 | * We need more stack when we run under a host | ||
33 | * maybe more expensive C lib functions? | ||
34 | * | ||
35 | * simulator (possibly) doesn't simulate stack usage anyway but well ... */ | ||
36 | |||
37 | #if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) | ||
38 | struct regs | ||
39 | { | ||
40 | void *t; /* OS thread */ | ||
41 | void *told; /* Last thread in slot (explained in thead-sdl.c) */ | ||
42 | void *s; /* Semaphore for blocking and wakeup */ | ||
43 | void (*start)(void); /* Start function */ | ||
44 | }; | ||
45 | |||
46 | #define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */ | ||
47 | #else | ||
48 | #include "asm/thread.h" | ||
49 | #endif /* HAVE_SDL_THREADS */ | ||
50 | |||
51 | #ifdef CPU_PP | ||
52 | #ifdef HAVE_CORELOCK_OBJECT | ||
53 | /* No reliable atomic instruction available - use Peterson's algorithm */ | ||
54 | struct corelock | ||
55 | { | ||
56 | volatile unsigned char myl[NUM_CORES]; | ||
57 | volatile unsigned char turn; | ||
58 | } __attribute__((packed)); | ||
59 | |||
60 | /* Too big to inline everywhere */ | ||
61 | void corelock_init(struct corelock *cl); | ||
62 | void corelock_lock(struct corelock *cl); | ||
63 | int corelock_try_lock(struct corelock *cl); | ||
64 | void corelock_unlock(struct corelock *cl); | ||
65 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
66 | #endif /* CPU_PP */ | ||
67 | |||
68 | /* NOTE: The use of the word "queue" may also refer to a linked list of | ||
69 | threads being maintained that are normally dealt with in FIFO order | ||
70 | and not necessarily kernel event_queue */ | ||
71 | enum | ||
72 | { | ||
73 | /* States without a timeout must be first */ | ||
74 | STATE_KILLED = 0, /* Thread is killed (default) */ | ||
75 | STATE_RUNNING, /* Thread is currently running */ | ||
76 | STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */ | ||
77 | /* These states involve adding the thread to the tmo list */ | ||
78 | STATE_SLEEPING, /* Thread is sleeping with a timeout */ | ||
79 | STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */ | ||
80 | /* Miscellaneous states */ | ||
81 | STATE_FROZEN, /* Thread is suspended and will not run until | ||
82 | thread_thaw is called with its ID */ | ||
83 | THREAD_NUM_STATES, | ||
84 | TIMEOUT_STATE_FIRST = STATE_SLEEPING, | ||
85 | }; | ||
86 | |||
87 | #if NUM_CORES > 1 | ||
88 | /* Pointer value for name field to indicate thread is being killed. Using | ||
89 | * an alternate STATE_* won't work since that would interfere with operation | ||
90 | * while the thread is still running. */ | ||
91 | #define THREAD_DESTRUCT ((const char *)~(intptr_t)0) | ||
92 | #endif | ||
93 | |||
94 | /* Link information for lists thread is in */ | ||
95 | struct thread_entry; /* forward */ | ||
96 | struct thread_list | ||
97 | { | ||
98 | struct thread_entry *prev; /* Previous thread in a list */ | ||
99 | struct thread_entry *next; /* Next thread in a list */ | ||
100 | }; | ||
101 | |||
102 | #ifndef HAVE_CORELOCK_OBJECT | ||
103 | /* No atomic corelock op needed or just none defined */ | ||
104 | #define corelock_init(cl) | ||
105 | #define corelock_lock(cl) | ||
106 | #define corelock_try_lock(cl) | ||
107 | #define corelock_unlock(cl) | ||
108 | #endif /* HAVE_CORELOCK_OBJECT */ | ||
109 | |||
110 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
111 | struct blocker | ||
112 | { | ||
113 | struct thread_entry * volatile thread; /* thread blocking other threads | ||
114 | (aka. object owner) */ | ||
115 | int priority; /* highest priority waiter */ | ||
116 | struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread); | ||
117 | }; | ||
118 | |||
119 | /* Choices of wakeup protocol */ | ||
120 | |||
121 | /* For transfer of object ownership by one thread to another thread by | ||
122 | * the owning thread itself (mutexes) */ | ||
123 | struct thread_entry * | ||
124 | wakeup_priority_protocol_transfer(struct thread_entry *thread); | ||
125 | |||
126 | /* For release by owner where ownership doesn't change - other threads, | ||
127 | * interrupts, timeouts, etc. (mutex timeout, queues) */ | ||
128 | struct thread_entry * | ||
129 | wakeup_priority_protocol_release(struct thread_entry *thread); | ||
130 | |||
131 | |||
132 | struct priority_distribution | ||
133 | { | ||
134 | uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ | ||
135 | uint32_t mask; /* Bitmask of hist entries that are not zero */ | ||
136 | }; | ||
137 | |||
138 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
139 | |||
140 | /* Information kept in each thread slot | ||
141 | * members are arranged according to size - largest first - in order | ||
142 | * to ensure both alignment and packing at the same time. | ||
143 | */ | ||
144 | struct thread_entry | ||
145 | { | ||
146 | struct regs context; /* Register context at switch - | ||
147 | _must_ be first member */ | ||
148 | uintptr_t *stack; /* Pointer to top of stack */ | ||
149 | const char *name; /* Thread name */ | ||
150 | long tmo_tick; /* Tick when thread should be woken from | ||
151 | timeout - | ||
152 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
153 | struct thread_list l; /* Links for blocked/waking/running - | ||
154 | circular linkage in both directions */ | ||
155 | struct thread_list tmo; /* Links for timeout list - | ||
156 | Circular in reverse direction, NULL-terminated in | ||
157 | forward direction - | ||
158 | states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */ | ||
159 | struct thread_entry **bqp; /* Pointer to list variable in kernel | ||
160 | object where thread is blocked - used | ||
161 | for implicit unblock and explicit wake | ||
162 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
163 | #ifdef HAVE_CORELOCK_OBJECT | ||
164 | struct corelock *obj_cl; /* Object corelock where thead is blocked - | ||
165 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
166 | struct corelock waiter_cl; /* Corelock for thread_wait */ | ||
167 | struct corelock slot_cl; /* Corelock to lock thread slot */ | ||
168 | unsigned char core; /* The core to which thread belongs */ | ||
169 | #endif | ||
170 | struct thread_entry *queue; /* List of threads waiting for thread to be | ||
171 | removed */ | ||
172 | #ifdef HAVE_WAKEUP_EXT_CB | ||
173 | void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that | ||
174 | performs special steps needed when being | ||
175 | forced off of an object's wait queue that | ||
176 | go beyond the standard wait queue removal | ||
177 | and priority disinheritance */ | ||
178 | /* Only enabled when using queue_send for now */ | ||
179 | #endif | ||
180 | #if defined(HAVE_SEMAPHORE_OBJECTS) || \ | ||
181 | defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ | ||
182 | NUM_CORES > 1 | ||
183 | volatile intptr_t retval; /* Return value from a blocked operation/ | ||
184 | misc. use */ | ||
185 | #endif | ||
186 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
187 | /* Priority summary of owned objects that support inheritance */ | ||
188 | struct blocker *blocker; /* Pointer to blocker when this thread is blocked | ||
189 | on an object that supports PIP - | ||
190 | states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ | ||
191 | struct priority_distribution pdist; /* Priority summary of owned objects | ||
192 | that have blocked threads and thread's own | ||
193 | base priority */ | ||
194 | int skip_count; /* Number of times skipped if higher priority | ||
195 | thread was running */ | ||
196 | unsigned char base_priority; /* Base priority (set explicitly during | ||
197 | creation or thread_set_priority) */ | ||
198 | unsigned char priority; /* Scheduled priority (higher of base or | ||
199 | all threads blocked by this one) */ | ||
200 | #endif | ||
201 | uint16_t id; /* Current slot id */ | ||
202 | unsigned short stack_size; /* Size of stack in bytes */ | ||
203 | unsigned char state; /* Thread slot state (STATE_*) */ | ||
204 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
205 | unsigned char cpu_boost; /* CPU frequency boost flag */ | ||
206 | #endif | ||
207 | #ifdef HAVE_IO_PRIORITY | ||
208 | unsigned char io_priority; | ||
209 | #endif | ||
210 | }; | ||
211 | |||
212 | /*** Macros for internal use ***/ | ||
213 | /* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */ | ||
214 | #define THREAD_ID_VERSION_SHIFT 8 | ||
215 | #define THREAD_ID_VERSION_MASK 0xff00 | ||
216 | #define THREAD_ID_SLOT_MASK 0x00ff | ||
217 | #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) | ||
218 | |||
219 | #ifdef HAVE_CORELOCK_OBJECT | ||
220 | /* Operations to be performed just before stopping a thread and starting | ||
221 | a new one if specified before calling switch_thread */ | ||
222 | enum | ||
223 | { | ||
224 | TBOP_CLEAR = 0, /* No operation to do */ | ||
225 | TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */ | ||
226 | TBOP_SWITCH_CORE, /* Call the core switch preparation routine */ | ||
227 | }; | ||
228 | |||
229 | struct thread_blk_ops | ||
230 | { | ||
231 | struct corelock *cl_p; /* pointer to corelock */ | ||
232 | unsigned char flags; /* TBOP_* flags */ | ||
233 | }; | ||
234 | #endif /* NUM_CORES > 1 */ | ||
235 | |||
236 | /* Information kept for each core | ||
237 | * Members are arranged for the same reason as in thread_entry | ||
238 | */ | ||
239 | struct core_entry | ||
240 | { | ||
241 | /* "Active" lists - core is constantly active on these and are never | ||
242 | locked and interrupts do not access them */ | ||
243 | struct thread_entry *running; /* threads that are running (RTR) */ | ||
244 | struct thread_entry *timeout; /* threads that are on a timeout before | ||
245 | running again */ | ||
246 | struct thread_entry *block_task; /* Task going off running list */ | ||
247 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
248 | struct priority_distribution rtr; /* Summary of running and ready-to-run | ||
249 | threads */ | ||
250 | #endif | ||
251 | long next_tmo_check; /* soonest time to check tmo threads */ | ||
252 | #ifdef HAVE_CORELOCK_OBJECT | ||
253 | struct thread_blk_ops blk_ops; /* operations to perform when | ||
254 | blocking a thread */ | ||
255 | struct corelock rtr_cl; /* Lock for rtr list */ | ||
256 | #endif /* NUM_CORES */ | ||
257 | }; | ||
258 | |||
259 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
260 | #define IF_PRIO(...) __VA_ARGS__ | ||
261 | #define IFN_PRIO(...) | ||
262 | #else | ||
263 | #define IF_PRIO(...) | ||
264 | #define IFN_PRIO(...) __VA_ARGS__ | ||
265 | #endif | ||
266 | |||
267 | void core_idle(void); | ||
268 | void core_wake(IF_COP_VOID(unsigned int core)); | ||
269 | |||
270 | /* Initialize the scheduler */ | ||
271 | void init_threads(void) INIT_ATTR; | ||
272 | |||
273 | /* Allocate a thread in the scheduler */ | ||
274 | #define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */ | ||
275 | unsigned int create_thread(void (*function)(void), | ||
276 | void* stack, size_t stack_size, | ||
277 | unsigned flags, const char *name | ||
278 | IF_PRIO(, int priority) | ||
279 | IF_COP(, unsigned int core)); | ||
280 | |||
281 | /* Set and clear the CPU frequency boost flag for the calling thread */ | ||
282 | #ifdef HAVE_SCHEDULER_BOOSTCTRL | ||
283 | void trigger_cpu_boost(void); | ||
284 | void cancel_cpu_boost(void); | ||
285 | #else | ||
286 | #define trigger_cpu_boost() do { } while(0) | ||
287 | #define cancel_cpu_boost() do { } while(0) | ||
288 | #endif | ||
289 | /* Return thread entry from id */ | ||
290 | struct thread_entry *thread_id_entry(unsigned int thread_id); | ||
291 | /* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN). | ||
292 | * Has no effect on a thread not frozen. */ | ||
293 | void thread_thaw(unsigned int thread_id); | ||
294 | /* Wait for a thread to exit */ | ||
295 | void thread_wait(unsigned int thread_id); | ||
296 | /* Exit the current thread */ | ||
297 | void thread_exit(void) NORETURN_ATTR; | ||
298 | #if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF) | ||
299 | #define ALLOW_REMOVE_THREAD | ||
300 | /* Remove a thread from the scheduler */ | ||
301 | void remove_thread(unsigned int thread_id); | ||
302 | #endif | ||
303 | |||
304 | /* Switch to next runnable thread */ | ||
305 | void switch_thread(void); | ||
306 | /* Blocks a thread for at least the specified number of ticks (0 = wait until | ||
307 | * next tick) */ | ||
308 | void sleep_thread(int ticks); | ||
309 | /* Indefinitely blocks the current thread on a thread queue */ | ||
310 | void block_thread(struct thread_entry *current); | ||
311 | /* Blocks the current thread on a thread queue until explicitely woken or | ||
312 | * the timeout is reached */ | ||
313 | void block_thread_w_tmo(struct thread_entry *current, int timeout); | ||
314 | |||
315 | /* Return bit flags for thread wakeup */ | ||
316 | #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ | ||
317 | #define THREAD_OK 0x1 /* A thread was woken up */ | ||
318 | #define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of | ||
319 | higher priority than current were woken) */ | ||
320 | |||
321 | /* A convenience function for waking an entire queue of threads. */ | ||
322 | unsigned int thread_queue_wake(struct thread_entry **list); | ||
323 | |||
324 | /* Wakeup a thread at the head of a list */ | ||
325 | unsigned int wakeup_thread(struct thread_entry **list); | ||
326 | |||
327 | #ifdef HAVE_PRIORITY_SCHEDULING | ||
328 | int thread_set_priority(unsigned int thread_id, int priority); | ||
329 | int thread_get_priority(unsigned int thread_id); | ||
330 | #endif /* HAVE_PRIORITY_SCHEDULING */ | ||
331 | #ifdef HAVE_IO_PRIORITY | ||
332 | void thread_set_io_priority(unsigned int thread_id, int io_priority); | ||
333 | int thread_get_io_priority(unsigned int thread_id); | ||
334 | #endif /* HAVE_IO_PRIORITY */ | ||
335 | #if NUM_CORES > 1 | ||
336 | unsigned int switch_core(unsigned int new_core); | ||
337 | #endif | ||
338 | |||
339 | /* Return the id of the calling thread. */ | ||
340 | unsigned int thread_self(void); | ||
341 | |||
342 | /* Return the thread_entry for the calling thread. | ||
343 | * INTERNAL: Intended for use by kernel and not for programs. */ | ||
344 | struct thread_entry* thread_self_entry(void); | ||
345 | |||
346 | /* Debugging info - only! */ | ||
347 | int thread_stack_usage(const struct thread_entry *thread); | ||
348 | #if NUM_CORES > 1 | ||
349 | int idle_stack_usage(unsigned int core); | ||
350 | #endif | ||
351 | void thread_get_name(char *buffer, int size, | ||
352 | struct thread_entry *thread); | ||
353 | #ifdef RB_PROFILE | ||
354 | void profile_thread(void); | ||
355 | #endif | ||
356 | |||
357 | #endif /* THREAD_H */ | ||
diff --git a/firmware/thread.c b/firmware/kernel/thread.c index b687144f4f..43ff584a68 100644 --- a/firmware/thread.c +++ b/firmware/kernel/thread.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #endif | 41 | #endif |
42 | #include "core_alloc.h" | 42 | #include "core_alloc.h" |
43 | #include "gcc_extensions.h" | 43 | #include "gcc_extensions.h" |
44 | #include "corelock.h" | ||
44 | 45 | ||
45 | /**************************************************************************** | 46 | /**************************************************************************** |
46 | * ATTENTION!! * | 47 | * ATTENTION!! * |
@@ -2390,3 +2391,52 @@ void thread_get_name(char *buffer, int size, | |||
2390 | snprintf(buffer, size, fmt, name); | 2391 | snprintf(buffer, size, fmt, name); |
2391 | } | 2392 | } |
2392 | } | 2393 | } |
2394 | |||
2395 | /* Unless otherwise defined, do nothing */ | ||
2396 | #ifndef YIELD_KERNEL_HOOK | ||
2397 | #define YIELD_KERNEL_HOOK() false | ||
2398 | #endif | ||
2399 | #ifndef SLEEP_KERNEL_HOOK | ||
2400 | #define SLEEP_KERNEL_HOOK(ticks) false | ||
2401 | #endif | ||
2402 | |||
2403 | /*--------------------------------------------------------------------------- | ||
2404 | * Suspends a thread's execution for at least the specified number of ticks. | ||
2405 | * | ||
2406 | * May result in CPU core entering wait-for-interrupt mode if no other thread | ||
2407 | * may be scheduled. | ||
2408 | * | ||
2409 | * NOTE: sleep(0) sleeps until the end of the current tick | ||
2410 | * sleep(n) that doesn't result in rescheduling: | ||
2411 | * n <= ticks suspended < n + 1 | ||
2412 | * n to n+1 is a lower bound. Other factors may affect the actual time | ||
2413 | * a thread is suspended before it runs again. | ||
2414 | *--------------------------------------------------------------------------- | ||
2415 | */ | ||
2416 | unsigned sleep(unsigned ticks) | ||
2417 | { | ||
2418 | /* In certain situations, certain bootloaders in particular, a normal | ||
2419 | * threading call is inappropriate. */ | ||
2420 | if (SLEEP_KERNEL_HOOK(ticks)) | ||
2421 | return 0; /* Handled */ | ||
2422 | |||
2423 | disable_irq(); | ||
2424 | sleep_thread(ticks); | ||
2425 | switch_thread(); | ||
2426 | return 0; | ||
2427 | } | ||
2428 | |||
2429 | /*--------------------------------------------------------------------------- | ||
2430 | * Elects another thread to run or, if no other thread may be made ready to | ||
2431 | * run, immediately returns control back to the calling thread. | ||
2432 | *--------------------------------------------------------------------------- | ||
2433 | */ | ||
2434 | void yield(void) | ||
2435 | { | ||
2436 | /* In certain situations, certain bootloaders in particular, a normal | ||
2437 | * threading call is inappropriate. */ | ||
2438 | if (YIELD_KERNEL_HOOK()) | ||
2439 | return; /* handled */ | ||
2440 | |||
2441 | switch_thread(); | ||
2442 | } | ||
diff --git a/firmware/kernel/tick.c b/firmware/kernel/tick.c new file mode 100644 index 0000000000..c524560687 --- /dev/null +++ b/firmware/kernel/tick.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2002 by Björn Stenberg | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | |||
22 | #include "config.h" | ||
23 | #include "tick.h" | ||
24 | #include "general.h" | ||
25 | #include "panic.h" | ||
26 | |||
27 | /**************************************************************************** | ||
28 | * Timer tick | ||
29 | *****************************************************************************/ | ||
30 | |||
31 | |||
32 | /* List of tick tasks - final element always NULL for termination */ | ||
33 | void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void); | ||
34 | |||
35 | #if !defined(CPU_PP) || !defined(BOOTLOADER) || \ | ||
36 | defined(HAVE_BOOTLOADER_USB_MODE) | ||
37 | volatile long current_tick SHAREDDATA_ATTR = 0; | ||
38 | #endif | ||
39 | |||
40 | /* - Timer initialization and interrupt handler is defined at | ||
41 | * the target level: tick_start() is implemented in the target tree */ | ||
42 | |||
43 | int tick_add_task(void (*f)(void)) | ||
44 | { | ||
45 | int oldlevel = disable_irq_save(); | ||
46 | void **arr = (void **)tick_funcs; | ||
47 | void **p = find_array_ptr(arr, f); | ||
48 | |||
49 | /* Add a task if there is room */ | ||
50 | if(p - arr < MAX_NUM_TICK_TASKS) | ||
51 | { | ||
52 | *p = f; /* If already in list, no problem. */ | ||
53 | } | ||
54 | else | ||
55 | { | ||
56 | panicf("Error! tick_add_task(): out of tasks"); | ||
57 | } | ||
58 | |||
59 | restore_irq(oldlevel); | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | int tick_remove_task(void (*f)(void)) | ||
64 | { | ||
65 | int oldlevel = disable_irq_save(); | ||
66 | int rc = remove_array_ptr((void **)tick_funcs, f); | ||
67 | restore_irq(oldlevel); | ||
68 | return rc; | ||
69 | } | ||
70 | |||
71 | void init_tick(void) | ||
72 | { | ||
73 | tick_start(1000/HZ); | ||
74 | } | ||
diff --git a/firmware/kernel/timeout.c b/firmware/kernel/timeout.c new file mode 100644 index 0000000000..8039e56ffb --- /dev/null +++ b/firmware/kernel/timeout.c | |||
@@ -0,0 +1,97 @@ | |||
1 | |||
2 | /**************************************************************************** | ||
3 | * Tick-based interval timers/one-shots - be mindful this is not really | ||
4 | * intended for continuous timers but for events that need to run for a short | ||
5 | * time and be cancelled without further software intervention. | ||
6 | ****************************************************************************/ | ||
7 | |||
8 | #include "config.h" | ||
9 | #include "system.h" /* TIME_AFTER */ | ||
10 | #include "kernel.h" | ||
11 | #include "timeout.h" | ||
12 | #include "general.h" | ||
13 | |||
14 | /* list of active timeout events */ | ||
15 | static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1]; | ||
16 | |||
17 | /* timeout tick task - calls event handlers when they expire | ||
18 | * Event handlers may alter expiration, callback and data during operation. | ||
19 | */ | ||
20 | static void timeout_tick(void) | ||
21 | { | ||
22 | unsigned long tick = current_tick; | ||
23 | struct timeout **p = tmo_list; | ||
24 | struct timeout *curr; | ||
25 | |||
26 | for(curr = *p; curr != NULL; curr = *(++p)) | ||
27 | { | ||
28 | int ticks; | ||
29 | |||
30 | if(TIME_BEFORE(tick, curr->expires)) | ||
31 | continue; | ||
32 | |||
33 | /* this event has expired - call callback */ | ||
34 | ticks = curr->callback(curr); | ||
35 | if(ticks > 0) | ||
36 | { | ||
37 | curr->expires = tick + ticks; /* reload */ | ||
38 | } | ||
39 | else | ||
40 | { | ||
41 | timeout_cancel(curr); /* cancel */ | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | |||
46 | /* Cancels a timeout callback - can be called from the ISR */ | ||
47 | void timeout_cancel(struct timeout *tmo) | ||
48 | { | ||
49 | int oldlevel = disable_irq_save(); | ||
50 | int rc = remove_array_ptr((void **)tmo_list, tmo); | ||
51 | |||
52 | if(rc >= 0 && *tmo_list == NULL) | ||
53 | { | ||
54 | tick_remove_task(timeout_tick); /* Last one - remove task */ | ||
55 | } | ||
56 | |||
57 | restore_irq(oldlevel); | ||
58 | } | ||
59 | |||
60 | /* Adds a timeout callback - calling with an active timeout resets the | ||
61 | interval - can be called from the ISR */ | ||
62 | void timeout_register(struct timeout *tmo, timeout_cb_type callback, | ||
63 | int ticks, intptr_t data) | ||
64 | { | ||
65 | int oldlevel; | ||
66 | void **arr, **p; | ||
67 | |||
68 | if(tmo == NULL) | ||
69 | return; | ||
70 | |||
71 | oldlevel = disable_irq_save(); | ||
72 | |||
73 | /* See if this one is already registered */ | ||
74 | arr = (void **)tmo_list; | ||
75 | p = find_array_ptr(arr, tmo); | ||
76 | |||
77 | if(p - arr < MAX_NUM_TIMEOUTS) | ||
78 | { | ||
79 | /* Vacancy */ | ||
80 | if(*p == NULL) | ||
81 | { | ||
82 | /* Not present */ | ||
83 | if(*tmo_list == NULL) | ||
84 | { | ||
85 | tick_add_task(timeout_tick); /* First one - add task */ | ||
86 | } | ||
87 | |||
88 | *p = tmo; | ||
89 | } | ||
90 | |||
91 | tmo->callback = callback; | ||
92 | tmo->data = data; | ||
93 | tmo->expires = current_tick + ticks; | ||
94 | } | ||
95 | |||
96 | restore_irq(oldlevel); | ||
97 | } | ||
diff --git a/firmware/target/arm/pp/debug-pp.c b/firmware/target/arm/pp/debug-pp.c index 2f57e1ef14..9e0dcad5f9 100644 --- a/firmware/target/arm/pp/debug-pp.c +++ b/firmware/target/arm/pp/debug-pp.c | |||
@@ -19,9 +19,10 @@ | |||
19 | * | 19 | * |
20 | ****************************************************************************/ | 20 | ****************************************************************************/ |
21 | 21 | ||
22 | #include <stdbool.h> | ||
22 | #include "config.h" | 23 | #include "config.h" |
23 | #include "system.h" | 24 | #include "system.h" |
24 | #include <stdbool.h> | 25 | #include "kernel.h" |
25 | #include "font.h" | 26 | #include "font.h" |
26 | #include "lcd.h" | 27 | #include "lcd.h" |
27 | #include "button.h" | 28 | #include "button.h" |
diff --git a/firmware/target/arm/pp/thread-pp.c b/firmware/target/arm/pp/thread-pp.c index 0836b27204..ed4bdbeac1 100644 --- a/firmware/target/arm/pp/thread-pp.c +++ b/firmware/target/arm/pp/thread-pp.c | |||
@@ -51,128 +51,6 @@ static uintptr_t * const idle_stacks[NUM_CORES] = | |||
51 | [COP] = cop_idlestackbegin | 51 | [COP] = cop_idlestackbegin |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* Core locks using Peterson's mutual exclusion algorithm */ | ||
55 | |||
56 | /*--------------------------------------------------------------------------- | ||
57 | * Initialize the corelock structure. | ||
58 | *--------------------------------------------------------------------------- | ||
59 | */ | ||
60 | void corelock_init(struct corelock *cl) | ||
61 | { | ||
62 | memset(cl, 0, sizeof (*cl)); | ||
63 | } | ||
64 | |||
65 | #if 1 /* Assembly locks to minimize overhead */ | ||
66 | /*--------------------------------------------------------------------------- | ||
67 | * Wait for the corelock to become free and acquire it when it does. | ||
68 | *--------------------------------------------------------------------------- | ||
69 | */ | ||
70 | void __attribute__((naked)) corelock_lock(struct corelock *cl) | ||
71 | { | ||
72 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
73 | asm volatile ( | ||
74 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
75 | "ldrb r1, [r1] \n" | ||
76 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
77 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
78 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
79 | "1: \n" | ||
80 | "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
81 | "cmp r3, #0 \n" /* yes? lock acquired */ | ||
82 | "bxeq lr \n" | ||
83 | "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */ | ||
84 | "cmp r3, r1 \n" | ||
85 | "bxeq lr \n" /* yes? lock acquired */ | ||
86 | "b 1b \n" /* keep trying */ | ||
87 | : : "i"(&PROCESSOR_ID) | ||
88 | ); | ||
89 | (void)cl; | ||
90 | } | ||
91 | |||
92 | /*--------------------------------------------------------------------------- | ||
93 | * Try to aquire the corelock. If free, caller gets it, otherwise return 0. | ||
94 | *--------------------------------------------------------------------------- | ||
95 | */ | ||
96 | int __attribute__((naked)) corelock_try_lock(struct corelock *cl) | ||
97 | { | ||
98 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | ||
99 | asm volatile ( | ||
100 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
101 | "ldrb r1, [r1] \n" | ||
102 | "mov r3, r0 \n" | ||
103 | "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */ | ||
104 | "eor r2, r1, #0xff \n" /* r2 = othercore */ | ||
105 | "strb r2, [r0, #2] \n" /* cl->turn = othercore */ | ||
106 | "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */ | ||
107 | "eors r0, r0, r2 \n" /* yes? lock acquired */ | ||
108 | "bxne lr \n" | ||
109 | "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */ | ||
110 | "ands r0, r0, r1 \n" | ||
111 | "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ | ||
112 | "bx lr \n" /* return result */ | ||
113 | : : "i"(&PROCESSOR_ID) | ||
114 | ); | ||
115 | |||
116 | return 0; | ||
117 | (void)cl; | ||
118 | } | ||
119 | |||
120 | /*--------------------------------------------------------------------------- | ||
121 | * Release ownership of the corelock | ||
122 | *--------------------------------------------------------------------------- | ||
123 | */ | ||
124 | void __attribute__((naked)) corelock_unlock(struct corelock *cl) | ||
125 | { | ||
126 | asm volatile ( | ||
127 | "mov r1, %0 \n" /* r1 = PROCESSOR_ID */ | ||
128 | "ldrb r1, [r1] \n" | ||
129 | "mov r2, #0 \n" /* cl->myl[core] = 0 */ | ||
130 | "strb r2, [r0, r1, lsr #7] \n" | ||
131 | "bx lr \n" | ||
132 | : : "i"(&PROCESSOR_ID) | ||
133 | ); | ||
134 | (void)cl; | ||
135 | } | ||
136 | |||
137 | #else /* C versions for reference */ | ||
138 | |||
139 | void corelock_lock(struct corelock *cl) | ||
140 | { | ||
141 | const unsigned int core = CURRENT_CORE; | ||
142 | const unsigned int othercore = 1 - core; | ||
143 | |||
144 | cl->myl[core] = core; | ||
145 | cl->turn = othercore; | ||
146 | |||
147 | for (;;) | ||
148 | { | ||
149 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | int corelock_try_lock(struct corelock *cl) | ||
155 | { | ||
156 | const unsigned int core = CURRENT_CORE; | ||
157 | const unsigned int othercore = 1 - core; | ||
158 | |||
159 | cl->myl[core] = core; | ||
160 | cl->turn = othercore; | ||
161 | |||
162 | if (cl->myl[othercore] == 0 || cl->turn == core) | ||
163 | { | ||
164 | return 1; | ||
165 | } | ||
166 | |||
167 | cl->myl[core] = 0; | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | void corelock_unlock(struct corelock *cl) | ||
172 | { | ||
173 | cl->myl[CURRENT_CORE] = 0; | ||
174 | } | ||
175 | #endif /* ASM / C selection */ | ||
176 | 54 | ||
177 | /*--------------------------------------------------------------------------- | 55 | /*--------------------------------------------------------------------------- |
178 | * Do any device-specific inits for the threads and synchronize the kernel | 56 | * Do any device-specific inits for the threads and synchronize the kernel |
diff --git a/firmware/target/arm/sandisk/sansa-e200/button-e200.c b/firmware/target/arm/sandisk/sansa-e200/button-e200.c index 1e952b3882..73279b44cf 100644 --- a/firmware/target/arm/sandisk/sansa-e200/button-e200.c +++ b/firmware/target/arm/sandisk/sansa-e200/button-e200.c | |||
@@ -22,6 +22,7 @@ | |||
22 | /* Taken from button-h10.c by Barry Wardell and reverse engineering by MrH. */ | 22 | /* Taken from button-h10.c by Barry Wardell and reverse engineering by MrH. */ |
23 | 23 | ||
24 | #include "system.h" | 24 | #include "system.h" |
25 | #include "kernel.h" | ||
25 | #include "button.h" | 26 | #include "button.h" |
26 | #include "backlight.h" | 27 | #include "backlight.h" |
27 | #include "powermgmt.h" | 28 | #include "powermgmt.h" |
diff --git a/firmware/target/arm/sandisk/sansa-e200/lcd-e200.c b/firmware/target/arm/sandisk/sansa-e200/lcd-e200.c index 39ceb9b8e0..c01f65865f 100644 --- a/firmware/target/arm/sandisk/sansa-e200/lcd-e200.c +++ b/firmware/target/arm/sandisk/sansa-e200/lcd-e200.c | |||
@@ -22,8 +22,10 @@ | |||
22 | * KIND, either express or implied. | 22 | * KIND, either express or implied. |
23 | * | 23 | * |
24 | ****************************************************************************/ | 24 | ****************************************************************************/ |
25 | |||
25 | #include "config.h" | 26 | #include "config.h" |
26 | #include "system.h" | 27 | #include "system.h" |
28 | #include "kernel.h" | ||
27 | #include "lcd.h" | 29 | #include "lcd.h" |
28 | #include "lcd-target.h" | 30 | #include "lcd-target.h" |
29 | 31 | ||
diff --git a/firmware/target/hosted/sdl/thread-sdl.c b/firmware/target/hosted/sdl/thread-sdl.c index fbc26c8a9f..c17e793833 100644 --- a/firmware/target/hosted/sdl/thread-sdl.c +++ b/firmware/target/hosted/sdl/thread-sdl.c | |||
@@ -682,3 +682,53 @@ void thread_get_name(char *buffer, int size, | |||
682 | snprintf(buffer, size, fmt, name); | 682 | snprintf(buffer, size, fmt, name); |
683 | } | 683 | } |
684 | } | 684 | } |
685 | |||
686 | /* Unless otherwise defined, do nothing */ | ||
687 | #ifndef YIELD_KERNEL_HOOK | ||
688 | #define YIELD_KERNEL_HOOK() false | ||
689 | #endif | ||
690 | #ifndef SLEEP_KERNEL_HOOK | ||
691 | #define SLEEP_KERNEL_HOOK(ticks) false | ||
692 | #endif | ||
693 | |||
694 | |||
695 | /*--------------------------------------------------------------------------- | ||
696 | * Suspends a thread's execution for at least the specified number of ticks. | ||
697 | * | ||
698 | * May result in CPU core entering wait-for-interrupt mode if no other thread | ||
699 | * may be scheduled. | ||
700 | * | ||
701 | * NOTE: sleep(0) sleeps until the end of the current tick | ||
702 | * sleep(n) that doesn't result in rescheduling: | ||
703 | * n <= ticks suspended < n + 1 | ||
704 | * n to n+1 is a lower bound. Other factors may affect the actual time | ||
705 | * a thread is suspended before it runs again. | ||
706 | *--------------------------------------------------------------------------- | ||
707 | */ | ||
708 | unsigned sleep(unsigned ticks) | ||
709 | { | ||
710 | /* In certain situations, certain bootloaders in particular, a normal | ||
711 | * threading call is inappropriate. */ | ||
712 | if (SLEEP_KERNEL_HOOK(ticks)) | ||
713 | return 0; /* Handled */ | ||
714 | |||
715 | disable_irq(); | ||
716 | sleep_thread(ticks); | ||
717 | switch_thread(); | ||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | /*--------------------------------------------------------------------------- | ||
722 | * Elects another thread to run or, if no other thread may be made ready to | ||
723 | * run, immediately returns control back to the calling thread. | ||
724 | *--------------------------------------------------------------------------- | ||
725 | */ | ||
726 | void yield(void) | ||
727 | { | ||
728 | /* In certain situations, certain bootloaders in particular, a normal | ||
729 | * threading call is inappropriate. */ | ||
730 | if (YIELD_KERNEL_HOOK()) | ||
731 | return; /* handled */ | ||
732 | |||
733 | switch_thread(); | ||
734 | } | ||