summaryrefslogtreecommitdiff
path: root/firmware/kernel
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2013-12-04 17:06:17 +0100
committerThomas Martitz <kugel@rockbox.org>2014-03-03 18:11:57 +0100
commit382d1861af12741af4ff235b9d18f179c0adc4c5 (patch)
tree26166c130d2889bb1ae1082e8f7aba103534f49e /firmware/kernel
parent8bae5f2644b5d5759499fbf1066b9c35c6f859ad (diff)
downloadrockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.tar.gz
rockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.zip
kernel: Break out kernel primitives into separate files and move to separate dir.
No code changed, just shuffling stuff around. This should make it easier to build only select parts kernel and use different implementations. Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
Diffstat (limited to 'firmware/kernel')
-rw-r--r--firmware/kernel/corelock.c40
-rw-r--r--firmware/kernel/include/corelock.h53
-rw-r--r--firmware/kernel/include/kernel.h69
-rw-r--r--firmware/kernel/include/mutex.h62
-rw-r--r--firmware/kernel/include/queue.h157
-rw-r--r--firmware/kernel/include/semaphore.h40
-rw-r--r--firmware/kernel/include/thread.h387
-rw-r--r--firmware/kernel/include/tick.h67
-rw-r--r--firmware/kernel/include/timeout.h46
-rw-r--r--firmware/kernel/kernel-internal.h49
-rw-r--r--firmware/kernel/mutex.c152
-rw-r--r--firmware/kernel/queue.c786
-rw-r--r--firmware/kernel/semaphore.c142
-rw-r--r--firmware/kernel/thread-internal.h357
-rw-r--r--firmware/kernel/thread.c2442
-rw-r--r--firmware/kernel/tick.c74
-rw-r--r--firmware/kernel/timeout.c97
17 files changed, 5020 insertions, 0 deletions
diff --git a/firmware/kernel/corelock.c b/firmware/kernel/corelock.c
new file mode 100644
index 0000000000..53d08a9069
--- /dev/null
+++ b/firmware/kernel/corelock.c
@@ -0,0 +1,40 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2007 by Daniel Ankers
11 *
12 * PP5002 and PP502x SoC threading support
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24#include <string.h>
25#include "corelock.h"
26
27/* Core locks using Peterson's mutual exclusion algorithm */
28
29
30/*---------------------------------------------------------------------------
31 * Initialize the corelock structure.
32 *---------------------------------------------------------------------------
33 */
34void corelock_init(struct corelock *cl)
35{
36 memset(cl, 0, sizeof (*cl));
37}
38
39/* other corelock methods are ASM-optimized */
40#include "asm/corelock.c"
diff --git a/firmware/kernel/include/corelock.h b/firmware/kernel/include/corelock.h
new file mode 100644
index 0000000000..79302e0e3c
--- /dev/null
+++ b/firmware/kernel/include/corelock.h
@@ -0,0 +1,53 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22
23#ifndef CORELOCK_H
24#define CORELOCK_H
25
26#include "config.h"
27
28#ifndef HAVE_CORELOCK_OBJECT
29
30/* No atomic corelock op needed or just none defined */
31#define corelock_init(cl)
32#define corelock_lock(cl)
33#define corelock_try_lock(cl)
34#define corelock_unlock(cl)
35
36#else
37
38/* No reliable atomic instruction available - use Peterson's algorithm */
39struct corelock
40{
41 volatile unsigned char myl[NUM_CORES];
42 volatile unsigned char turn;
43} __attribute__((packed));
44
45/* Too big to inline everywhere */
46extern void corelock_init(struct corelock *cl);
47extern void corelock_lock(struct corelock *cl);
48extern int corelock_try_lock(struct corelock *cl);
49extern void corelock_unlock(struct corelock *cl);
50
51#endif /* HAVE_CORELOCK_OBJECT */
52
53#endif /* CORELOCK_H */
diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h
new file mode 100644
index 0000000000..fafff25ce4
--- /dev/null
+++ b/firmware/kernel/include/kernel.h
@@ -0,0 +1,69 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#ifndef KERNEL_H
22#define KERNEL_H
23
24#include "config.h"
25
26#include "system.h"
27#include "queue.h"
28#include "mutex.h"
29#include "tick.h"
30
31#ifdef INCLUDE_TIMEOUT_API
32#include "timeout.h"
33#endif
34
35#ifdef HAVE_SEMAPHORE_OBJECTS
36#include "semaphore.h"
37#endif
38
39#ifdef HAVE_CORELOCK_OBJECT
40#include "corelock.h"
41#endif
42
43#define OBJ_WAIT_TIMEDOUT (-1)
44#define OBJ_WAIT_FAILED 0
45#define OBJ_WAIT_SUCCEEDED 1
46
47#define TIMEOUT_BLOCK -1
48#define TIMEOUT_NOBLOCK 0
49
50static inline void kernel_init(void)
51{
52 /* Init the threading API */
53 init_threads();
54
55 /* Other processors will not reach this point in a multicore build.
56 * In a single-core build with multiple cores they fall-through and
57 * sleep in cop_main without returning. */
58 if (CURRENT_CORE == CPU)
59 {
60 init_queues();
61 init_tick();
62#ifdef KDEV_INIT
63 kernel_device_init();
64#endif
65 }
66}
67
68
69#endif /* KERNEL_H */
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h
new file mode 100644
index 0000000000..bcf5701bd9
--- /dev/null
+++ b/firmware/kernel/include/mutex.h
@@ -0,0 +1,62 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef MUTEX_H
23#define MUTEX_H
24
25#include <stdbool.h>
26#include "config.h"
27#include "thread.h"
28
29struct mutex
30{
31 struct thread_entry *queue; /* waiter list */
32 int recursion; /* lock owner recursion count */
33#ifdef HAVE_PRIORITY_SCHEDULING
34 struct blocker blocker; /* priority inheritance info
35 for waiters */
36 bool no_preempt; /* don't allow higher-priority thread
37 to be scheduled even if woken */
38#else
39 struct thread_entry *thread; /* Indicates owner thread - an owner
40 implies a locked state - same goes
41 for priority scheduling
42 (in blocker struct for that) */
43#endif
44 IF_COP( struct corelock cl; ) /* multiprocessor sync */
45};
46
47extern void mutex_init(struct mutex *m);
48extern void mutex_lock(struct mutex *m);
49extern void mutex_unlock(struct mutex *m);
50#ifdef HAVE_PRIORITY_SCHEDULING
51/* Deprecated temporary function to disable mutex preempting a thread on
52 * unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
53 * reliance on it is a bug! */
54static inline void mutex_set_preempt(struct mutex *m, bool preempt)
55 { m->no_preempt = !preempt; }
56#else
57/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
58static inline bool mutex_test(const struct mutex *m)
59 { return m->thread != NULL; }
60#endif /* HAVE_PRIORITY_SCHEDULING */
61
62#endif /* MUTEX_H */
diff --git a/firmware/kernel/include/queue.h b/firmware/kernel/include/queue.h
new file mode 100644
index 0000000000..1b404f8297
--- /dev/null
+++ b/firmware/kernel/include/queue.h
@@ -0,0 +1,157 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef QUEUE_H
23#define QUEUE_H
24
25#include <stdint.h>
26#include "config.h"
27#include "thread.h"
28
29/* System defined message ID's - |sign bit = 1|class|id| */
30/* Event class list */
31#define SYS_EVENT_CLS_QUEUE 0
32#define SYS_EVENT_CLS_USB 1
33#define SYS_EVENT_CLS_POWER 2
34#define SYS_EVENT_CLS_FILESYS 3
35#define SYS_EVENT_CLS_PLUG 4
36#define SYS_EVENT_CLS_MISC 5
37#define SYS_EVENT_CLS_PRIVATE 7 /* For use inside plugins */
38/* make sure SYS_EVENT_CLS_BITS has enough range */
39
40/* Bit 31->|S|c...c|i...i| */
41#define SYS_EVENT ((long)(int)(1 << 31))
42#define SYS_EVENT_CLS_BITS (3)
43#define SYS_EVENT_CLS_SHIFT (31-SYS_EVENT_CLS_BITS)
44#define SYS_EVENT_CLS_MASK (((1l << SYS_EVENT_CLS_BITS)-1) << SYS_EVENT_SHIFT)
45#define MAKE_SYS_EVENT(cls, id) (SYS_EVENT | ((long)(cls) << SYS_EVENT_CLS_SHIFT) | (long)(id))
46/* Macros for extracting codes */
47#define SYS_EVENT_CLS(e) (((e) & SYS_EVENT_CLS_MASK) >> SYS_EVENT_SHIFT)
48#define SYS_EVENT_ID(e) ((e) & ~(SYS_EVENT|SYS_EVENT_CLS_MASK))
49
50#define SYS_TIMEOUT MAKE_SYS_EVENT(SYS_EVENT_CLS_QUEUE, 0)
51#define SYS_USB_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 0)
52#define SYS_USB_CONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 1)
53#define SYS_USB_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 2)
54#define SYS_USB_LUN_LOCKED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 4)
55#define SYS_USB_READ_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 5)
56#define SYS_USB_WRITE_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 6)
57#define SYS_POWEROFF MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 0)
58#define SYS_CHARGER_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 1)
59#define SYS_CHARGER_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 2)
60#define SYS_BATTERY_UPDATE MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 3)
61#define SYS_FS_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_FILESYS, 0)
62#define SYS_HOTSWAP_INSERTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 0)
63#define SYS_HOTSWAP_EXTRACTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 1)
64#define SYS_PHONE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 2)
65#define SYS_PHONE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 3)
66#define SYS_REMOTE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 4)
67#define SYS_REMOTE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 5)
68#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
69#define SYS_CALL_INCOMING MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 3)
70#define SYS_CALL_HUNG_UP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 4)
71#define SYS_VOLUME_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 5)
72
73#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
74
75#define MAX_NUM_QUEUES 32
76#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
77#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
78
79struct queue_event
80{
81 long id;
82 intptr_t data;
83};
84
85#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
86struct queue_sender_list
87{
88 /* If non-NULL, there is a thread waiting for the corresponding event */
89 /* Must be statically allocated to put in non-cached ram. */
90 struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
91 struct thread_entry *list; /* list of senders in map */
92 /* Send info for last message dequeued or NULL if replied or not sent */
93 struct thread_entry * volatile curr_sender;
94#ifdef HAVE_PRIORITY_SCHEDULING
95 struct blocker blocker;
96#endif
97};
98#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
99
100#ifdef HAVE_PRIORITY_SCHEDULING
101#define QUEUE_GET_THREAD(q) \
102 (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
103#else
104/* Queue without priority enabled have no owner provision _at this time_ */
105#define QUEUE_GET_THREAD(q) \
106 (NULL)
107#endif
108
109struct event_queue
110{
111 struct thread_entry *queue; /* waiter list */
112 struct queue_event events[QUEUE_LENGTH]; /* list of events */
113 unsigned int volatile read; /* head of queue */
114 unsigned int volatile write; /* tail of queue */
115#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
116 struct queue_sender_list * volatile send; /* list of threads waiting for
117 reply to an event */
118#ifdef HAVE_PRIORITY_SCHEDULING
119 struct blocker *blocker_p; /* priority inheritance info
120 for sync message senders */
121#endif
122#endif
123 IF_COP( struct corelock cl; ) /* multiprocessor sync */
124};
125
126extern void queue_init(struct event_queue *q, bool register_queue);
127extern void queue_delete(struct event_queue *q);
128extern void queue_wait(struct event_queue *q, struct queue_event *ev);
129extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
130 int ticks);
131extern void queue_post(struct event_queue *q, long id, intptr_t data);
132#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
133extern void queue_enable_queue_send(struct event_queue *q,
134 struct queue_sender_list *send,
135 unsigned int owner_id);
136extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
137extern void queue_reply(struct event_queue *q, intptr_t retval);
138extern bool queue_in_queue_send(struct event_queue *q);
139#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
140extern bool queue_empty(const struct event_queue* q);
141extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
142
143#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */
144#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */
145#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */
146extern bool queue_peek_ex(struct event_queue *q,
147 struct queue_event *ev,
148 unsigned int flags,
149 const long (*filters)[2]);
150
151extern void queue_clear(struct event_queue* q);
152extern void queue_remove_from_head(struct event_queue *q, long id);
153extern int queue_count(const struct event_queue *q);
154extern int queue_broadcast(long id, intptr_t data);
155extern void init_queues(void);
156
157#endif /* QUEUE_H */
diff --git a/firmware/kernel/include/semaphore.h b/firmware/kernel/include/semaphore.h
new file mode 100644
index 0000000000..40e60bb88d
--- /dev/null
+++ b/firmware/kernel/include/semaphore.h
@@ -0,0 +1,40 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef SEMAPHORE_H
23#define SEMAPHORE_H
24
25#include "config.h"
26#include "thread.h"
27
28struct semaphore
29{
30 struct thread_entry *queue; /* Waiter list */
31 int volatile count; /* # of waits remaining before unsignaled */
32 int max; /* maximum # of waits to remain signaled */
33 IF_COP( struct corelock cl; ) /* multiprocessor sync */
34};
35
36extern void semaphore_init(struct semaphore *s, int max, int start);
37extern int semaphore_wait(struct semaphore *s, int timeout);
38extern void semaphore_release(struct semaphore *s);
39
40#endif /* SEMAPHORE_H */
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h
new file mode 100644
index 0000000000..9cc33b23ae
--- /dev/null
+++ b/firmware/kernel/include/thread.h
@@ -0,0 +1,387 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef THREAD_H
23#define THREAD_H
24
25#include "config.h"
26#include <inttypes.h>
27#include <stddef.h>
28#include <stdbool.h>
29#include "gcc_extensions.h"
30#include "corelock.h"
31
32/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
33 * by giving high priority threads more CPU time than lower priority threads
34 * when they need it. Priority is differential such that the priority
35 * difference between a lower priority runnable thread and the highest priority
36 * runnable thread determines the amount of aging necessary for the lower
37 * priority thread to be scheduled in order to prevent starvation.
38 *
39 * If software playback codec pcm buffer is going down to critical, codec
40 * can gradually raise its own priority to override user interface and
41 * prevent playback skipping.
42 */
43#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
44#define PRIORITY_RESERVED_LOW 32 /* Reserved */
45#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
46#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
47/* Realtime range reserved for threads that will not allow threads of lower
48 * priority to age and run (future expansion) */
49#define PRIORITY_REALTIME_1 1
50#define PRIORITY_REALTIME_2 2
51#define PRIORITY_REALTIME_3 3
52#define PRIORITY_REALTIME_4 4
53#define PRIORITY_REALTIME 4 /* Lowest realtime range */
54#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
55#define PRIORITY_USER_INTERFACE 16 /* The main thread */
56#define PRIORITY_RECORDING 16 /* Recording thread */
57#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
58#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
59#define PRIORITY_SYSTEM 18 /* All other firmware threads */
60#define PRIORITY_BACKGROUND 20 /* Normal application threads */
61#define NUM_PRIORITIES 32
62#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
63
64#define IO_PRIORITY_IMMEDIATE 0
65#define IO_PRIORITY_BACKGROUND 32
66
67
68#if CONFIG_CODEC == SWCODEC
69# ifdef HAVE_HARDWARE_CLICK
70# define BASETHREADS 17
71# else
72# define BASETHREADS 16
73# endif
74#else
75# define BASETHREADS 11
76#endif /* CONFIG_CODE == * */
77
78#ifndef TARGET_EXTRA_THREADS
79#define TARGET_EXTRA_THREADS 0
80#endif
81
82#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
83/*
84 * We need more stack when we run under a host
85 * maybe more expensive C lib functions?
86 *
87 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
88
89#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
90struct regs
91{
92 void *t; /* OS thread */
93 void *told; /* Last thread in slot (explained in thead-sdl.c) */
94 void *s; /* Semaphore for blocking and wakeup */
95 void (*start)(void); /* Start function */
96};
97
98#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
99#else
100#include "asm/thread.h"
101#endif /* HAVE_SDL_THREADS */
102
103/* NOTE: The use of the word "queue" may also refer to a linked list of
104 threads being maintained that are normally dealt with in FIFO order
105 and not necessarily kernel event_queue */
106enum
107{
108 /* States without a timeout must be first */
109 STATE_KILLED = 0, /* Thread is killed (default) */
110 STATE_RUNNING, /* Thread is currently running */
111 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
112 /* These states involve adding the thread to the tmo list */
113 STATE_SLEEPING, /* Thread is sleeping with a timeout */
114 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
115 /* Miscellaneous states */
116 STATE_FROZEN, /* Thread is suspended and will not run until
117 thread_thaw is called with its ID */
118 THREAD_NUM_STATES,
119 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
120};
121
122#if NUM_CORES > 1
123/* Pointer value for name field to indicate thread is being killed. Using
124 * an alternate STATE_* won't work since that would interfere with operation
125 * while the thread is still running. */
126#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
127#endif
128
129/* Link information for lists thread is in */
130struct thread_entry; /* forward */
131struct thread_list
132{
133 struct thread_entry *prev; /* Previous thread in a list */
134 struct thread_entry *next; /* Next thread in a list */
135};
136
137#ifdef HAVE_PRIORITY_SCHEDULING
138struct blocker
139{
140 struct thread_entry * volatile thread; /* thread blocking other threads
141 (aka. object owner) */
142 int priority; /* highest priority waiter */
143 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
144};
145
146/* Choices of wakeup protocol */
147
148/* For transfer of object ownership by one thread to another thread by
149 * the owning thread itself (mutexes) */
150struct thread_entry *
151 wakeup_priority_protocol_transfer(struct thread_entry *thread);
152
153/* For release by owner where ownership doesn't change - other threads,
154 * interrupts, timeouts, etc. (mutex timeout, queues) */
155struct thread_entry *
156 wakeup_priority_protocol_release(struct thread_entry *thread);
157
158
159struct priority_distribution
160{
161 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
162 uint32_t mask; /* Bitmask of hist entries that are not zero */
163};
164
165#endif /* HAVE_PRIORITY_SCHEDULING */
166
167/* Information kept in each thread slot
168 * members are arranged according to size - largest first - in order
169 * to ensure both alignment and packing at the same time.
170 */
171struct thread_entry
172{
173 struct regs context; /* Register context at switch -
174 _must_ be first member */
175 uintptr_t *stack; /* Pointer to top of stack */
176 const char *name; /* Thread name */
177 long tmo_tick; /* Tick when thread should be woken from
178 timeout -
179 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
180 struct thread_list l; /* Links for blocked/waking/running -
181 circular linkage in both directions */
182 struct thread_list tmo; /* Links for timeout list -
183 Circular in reverse direction, NULL-terminated in
184 forward direction -
185 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
186 struct thread_entry **bqp; /* Pointer to list variable in kernel
187 object where thread is blocked - used
188 for implicit unblock and explicit wake
189 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
190#ifdef HAVE_CORELOCK_OBJECT
191 struct corelock *obj_cl; /* Object corelock where thead is blocked -
192 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
193 struct corelock waiter_cl; /* Corelock for thread_wait */
194 struct corelock slot_cl; /* Corelock to lock thread slot */
195 unsigned char core; /* The core to which thread belongs */
196#endif
197 struct thread_entry *queue; /* List of threads waiting for thread to be
198 removed */
199#ifdef HAVE_WAKEUP_EXT_CB
200 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
201 performs special steps needed when being
202 forced off of an object's wait queue that
203 go beyond the standard wait queue removal
204 and priority disinheritance */
205 /* Only enabled when using queue_send for now */
206#endif
207#if defined(HAVE_SEMAPHORE_OBJECTS) || \
208 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
209 NUM_CORES > 1
210 volatile intptr_t retval; /* Return value from a blocked operation/
211 misc. use */
212#endif
213#ifdef HAVE_PRIORITY_SCHEDULING
214 /* Priority summary of owned objects that support inheritance */
215 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
216 on an object that supports PIP -
217 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
218 struct priority_distribution pdist; /* Priority summary of owned objects
219 that have blocked threads and thread's own
220 base priority */
221 int skip_count; /* Number of times skipped if higher priority
222 thread was running */
223 unsigned char base_priority; /* Base priority (set explicitly during
224 creation or thread_set_priority) */
225 unsigned char priority; /* Scheduled priority (higher of base or
226 all threads blocked by this one) */
227#endif
228 uint16_t id; /* Current slot id */
229 unsigned short stack_size; /* Size of stack in bytes */
230 unsigned char state; /* Thread slot state (STATE_*) */
231#ifdef HAVE_SCHEDULER_BOOSTCTRL
232 unsigned char cpu_boost; /* CPU frequency boost flag */
233#endif
234#ifdef HAVE_IO_PRIORITY
235 unsigned char io_priority;
236#endif
237};
238
239/*** Macros for internal use ***/
240/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
241#define THREAD_ID_VERSION_SHIFT 8
242#define THREAD_ID_VERSION_MASK 0xff00
243#define THREAD_ID_SLOT_MASK 0x00ff
244#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
245
246#ifdef HAVE_CORELOCK_OBJECT
247/* Operations to be performed just before stopping a thread and starting
248 a new one if specified before calling switch_thread */
249enum
250{
251 TBOP_CLEAR = 0, /* No operation to do */
252 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
253 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
254};
255
256struct thread_blk_ops
257{
258 struct corelock *cl_p; /* pointer to corelock */
259 unsigned char flags; /* TBOP_* flags */
260};
261#endif /* NUM_CORES > 1 */
262
263/* Information kept for each core
264 * Members are arranged for the same reason as in thread_entry
265 */
266struct core_entry
267{
268 /* "Active" lists - core is constantly active on these and are never
269 locked and interrupts do not access them */
270 struct thread_entry *running; /* threads that are running (RTR) */
271 struct thread_entry *timeout; /* threads that are on a timeout before
272 running again */
273 struct thread_entry *block_task; /* Task going off running list */
274#ifdef HAVE_PRIORITY_SCHEDULING
275 struct priority_distribution rtr; /* Summary of running and ready-to-run
276 threads */
277#endif
278 long next_tmo_check; /* soonest time to check tmo threads */
279#ifdef HAVE_CORELOCK_OBJECT
280 struct thread_blk_ops blk_ops; /* operations to perform when
281 blocking a thread */
282 struct corelock rtr_cl; /* Lock for rtr list */
283#endif /* NUM_CORES */
284};
285
286extern void yield(void);
287extern unsigned sleep(unsigned ticks);
288
289#ifdef HAVE_PRIORITY_SCHEDULING
290#define IF_PRIO(...) __VA_ARGS__
291#define IFN_PRIO(...)
292#else
293#define IF_PRIO(...)
294#define IFN_PRIO(...) __VA_ARGS__
295#endif
296
297void core_idle(void);
298void core_wake(IF_COP_VOID(unsigned int core));
299
300/* Initialize the scheduler */
301void init_threads(void) INIT_ATTR;
302
303/* Allocate a thread in the scheduler */
304#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
305unsigned int create_thread(void (*function)(void),
306 void* stack, size_t stack_size,
307 unsigned flags, const char *name
308 IF_PRIO(, int priority)
309 IF_COP(, unsigned int core));
310
311/* Set and clear the CPU frequency boost flag for the calling thread */
312#ifdef HAVE_SCHEDULER_BOOSTCTRL
313void trigger_cpu_boost(void);
314void cancel_cpu_boost(void);
315#else
316#define trigger_cpu_boost() do { } while(0)
317#define cancel_cpu_boost() do { } while(0)
318#endif
319/* Return thread entry from id */
320struct thread_entry *thread_id_entry(unsigned int thread_id);
321/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
322 * Has no effect on a thread not frozen. */
323void thread_thaw(unsigned int thread_id);
324/* Wait for a thread to exit */
325void thread_wait(unsigned int thread_id);
326/* Exit the current thread */
327void thread_exit(void) NORETURN_ATTR;
328#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
329#define ALLOW_REMOVE_THREAD
330/* Remove a thread from the scheduler */
331void remove_thread(unsigned int thread_id);
332#endif
333
334/* Switch to next runnable thread */
335void switch_thread(void);
336/* Blocks a thread for at least the specified number of ticks (0 = wait until
337 * next tick) */
338void sleep_thread(int ticks);
339/* Indefinitely blocks the current thread on a thread queue */
340void block_thread(struct thread_entry *current);
341/* Blocks the current thread on a thread queue until explicitely woken or
342 * the timeout is reached */
343void block_thread_w_tmo(struct thread_entry *current, int timeout);
344
345/* Return bit flags for thread wakeup */
346#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
347#define THREAD_OK 0x1 /* A thread was woken up */
348#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
349 higher priority than current were woken) */
350
351/* A convenience function for waking an entire queue of threads. */
352unsigned int thread_queue_wake(struct thread_entry **list);
353
354/* Wakeup a thread at the head of a list */
355unsigned int wakeup_thread(struct thread_entry **list);
356
357#ifdef HAVE_PRIORITY_SCHEDULING
358int thread_set_priority(unsigned int thread_id, int priority);
359int thread_get_priority(unsigned int thread_id);
360#endif /* HAVE_PRIORITY_SCHEDULING */
361#ifdef HAVE_IO_PRIORITY
362void thread_set_io_priority(unsigned int thread_id, int io_priority);
363int thread_get_io_priority(unsigned int thread_id);
364#endif /* HAVE_IO_PRIORITY */
365#if NUM_CORES > 1
366unsigned int switch_core(unsigned int new_core);
367#endif
368
369/* Return the id of the calling thread. */
370unsigned int thread_self(void);
371
372/* Return the thread_entry for the calling thread.
373 * INTERNAL: Intended for use by kernel and not for programs. */
374struct thread_entry* thread_self_entry(void);
375
376/* Debugging info - only! */
377int thread_stack_usage(const struct thread_entry *thread);
378#if NUM_CORES > 1
379int idle_stack_usage(unsigned int core);
380#endif
381void thread_get_name(char *buffer, int size,
382 struct thread_entry *thread);
383#ifdef RB_PROFILE
384void profile_thread(void);
385#endif
386
387#endif /* THREAD_H */
diff --git a/firmware/kernel/include/tick.h b/firmware/kernel/include/tick.h
new file mode 100644
index 0000000000..9810f4a1e5
--- /dev/null
+++ b/firmware/kernel/include/tick.h
@@ -0,0 +1,67 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#ifndef TICK_H
22#define TICK_H
23
24#include "config.h"
25#include "system.h" /* for NULL */
26extern void init_tick(void);
27
28#define HZ 100 /* number of ticks per second */
29
30#define MAX_NUM_TICK_TASKS 8
31
32/* global tick variable */
33#if defined(CPU_PP) && defined(BOOTLOADER) && \
34 !defined(HAVE_BOOTLOADER_USB_MODE)
35/* We don't enable interrupts in the PP bootloader unless USB mode is
36 enabled for it, so we need to fake the current_tick variable */
37#define current_tick (signed)(USEC_TIMER/10000)
38
39static inline void call_tick_tasks(void)
40{
41}
42#else
43extern volatile long current_tick;
44
45/* inline helper for implementing target interrupt handler */
46static inline void call_tick_tasks(void)
47{
48 extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
49 void (**p)(void) = tick_funcs;
50 void (*fn)(void);
51
52 current_tick++;
53
54 for(fn = *p; fn != NULL; fn = *(++p))
55 {
56 fn();
57 }
58}
59#endif
60
61/* implemented in target tree */
62extern void tick_start(unsigned int interval_in_ms) INIT_ATTR;
63
64extern int tick_add_task(void (*f)(void));
65extern int tick_remove_task(void (*f)(void));
66
67#endif /* TICK_H */
diff --git a/firmware/kernel/include/timeout.h b/firmware/kernel/include/timeout.h
new file mode 100644
index 0000000000..0b7c52ba4c
--- /dev/null
+++ b/firmware/kernel/include/timeout.h
@@ -0,0 +1,46 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#ifndef _KERNEL_H_
22#define _KERNEL_H_
23
24#include "config.h"
25
26struct timeout;
27
28/* timeout callback type
29 * tmo - pointer to struct timeout associated with event
30 * return next interval or <= 0 to stop event
31 */
32#define MAX_NUM_TIMEOUTS 8
33typedef int (* timeout_cb_type)(struct timeout *tmo);
34
35struct timeout
36{
37 timeout_cb_type callback;/* callback - returning false cancels */
38 intptr_t data; /* data passed to callback */
39 long expires; /* expiration tick */
40};
41
42void timeout_register(struct timeout *tmo, timeout_cb_type callback,
43 int ticks, intptr_t data);
44void timeout_cancel(struct timeout *tmo);
45
46#endif /* _KERNEL_H_ */
diff --git a/firmware/kernel/kernel-internal.h b/firmware/kernel/kernel-internal.h
new file mode 100644
index 0000000000..51c589ac8f
--- /dev/null
+++ b/firmware/kernel/kernel-internal.h
@@ -0,0 +1,49 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef KERNEL_INTERNAL_H
23#define KERNEL_INTERNAL_H
24
25#include "config.h"
26#include "debug.h"
27
28/* Make this nonzero to enable more elaborate checks on objects */
29#if defined(DEBUG) || defined(SIMULATOR)
30#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
31#else
32#define KERNEL_OBJECT_CHECKS 0
33#endif
34
35#if KERNEL_OBJECT_CHECKS
36#ifdef SIMULATOR
37#include <stdlib.h>
38#define KERNEL_ASSERT(exp, msg...) \
39 ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
40#else
41#define KERNEL_ASSERT(exp, msg...) \
42 ({ if (!({ exp; })) panicf(msg); })
43#endif
44#else
45#define KERNEL_ASSERT(exp, msg...) ({})
46#endif
47
48
49#endif /* KERNEL_INTERNAL_H */
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c
new file mode 100644
index 0000000000..f1e4b3c722
--- /dev/null
+++ b/firmware/kernel/mutex.c
@@ -0,0 +1,152 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22
23/****************************************************************************
24 * Simple mutex functions ;)
25 ****************************************************************************/
26
27#include <stdbool.h>
28#include "config.h"
29#include "system.h"
30#include "mutex.h"
31#include "corelock.h"
32#include "thread-internal.h"
33#include "kernel-internal.h"
34
35static inline void __attribute__((always_inline))
36mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
37{
38#ifdef HAVE_PRIORITY_SCHEDULING
39 mtx->blocker.thread = td;
40#else
41 mtx->thread = td;
42#endif
43}
44
45static inline struct thread_entry * __attribute__((always_inline))
46mutex_get_thread(volatile struct mutex *mtx)
47{
48#ifdef HAVE_PRIORITY_SCHEDULING
49 return mtx->blocker.thread;
50#else
51 return mtx->thread;
52#endif
53}
54
55/* Initialize a mutex object - call before any use and do not call again once
56 * the object is available to other threads */
57void mutex_init(struct mutex *m)
58{
59 corelock_init(&m->cl);
60 m->queue = NULL;
61 m->recursion = 0;
62 mutex_set_thread(m, NULL);
63#ifdef HAVE_PRIORITY_SCHEDULING
64 m->blocker.priority = PRIORITY_IDLE;
65 m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
66 m->no_preempt = false;
67#endif
68}
69
70/* Gain ownership of a mutex object or block until it becomes free */
71void mutex_lock(struct mutex *m)
72{
73 struct thread_entry *current = thread_self_entry();
74
75 if(current == mutex_get_thread(m))
76 {
77 /* current thread already owns this mutex */
78 m->recursion++;
79 return;
80 }
81
82 /* lock out other cores */
83 corelock_lock(&m->cl);
84
85 /* must read thread again inside cs (a multiprocessor concern really) */
86 if(LIKELY(mutex_get_thread(m) == NULL))
87 {
88 /* lock is open */
89 mutex_set_thread(m, current);
90 corelock_unlock(&m->cl);
91 return;
92 }
93
94 /* block until the lock is open... */
95 IF_COP( current->obj_cl = &m->cl; )
96 IF_PRIO( current->blocker = &m->blocker; )
97 current->bqp = &m->queue;
98
99 disable_irq();
100 block_thread(current);
101
102 corelock_unlock(&m->cl);
103
104 /* ...and turn control over to next thread */
105 switch_thread();
106}
107
108/* Release ownership of a mutex object - only owning thread must call this */
109void mutex_unlock(struct mutex *m)
110{
111 /* unlocker not being the owner is an unlocking violation */
112 KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
113 "mutex_unlock->wrong thread (%s != %s)\n",
114 mutex_get_thread(m)->name,
115 thread_self_entry()->name);
116
117 if(m->recursion > 0)
118 {
119 /* this thread still owns lock */
120 m->recursion--;
121 return;
122 }
123
124 /* lock out other cores */
125 corelock_lock(&m->cl);
126
127 /* transfer to next queued thread if any */
128 if(LIKELY(m->queue == NULL))
129 {
130 /* no threads waiting - open the lock */
131 mutex_set_thread(m, NULL);
132 corelock_unlock(&m->cl);
133 return;
134 }
135 else
136 {
137 const int oldlevel = disable_irq_save();
138 /* Tranfer of owning thread is handled in the wakeup protocol
139 * if priorities are enabled otherwise just set it from the
140 * queue head. */
141 IFN_PRIO( mutex_set_thread(m, m->queue); )
142 IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
143 restore_irq(oldlevel);
144
145 corelock_unlock(&m->cl);
146
147#ifdef HAVE_PRIORITY_SCHEDULING
148 if((result & THREAD_SWITCH) && !m->no_preempt)
149 switch_thread();
150#endif
151 }
152}
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c
new file mode 100644
index 0000000000..379e3f62c8
--- /dev/null
+++ b/firmware/kernel/queue.c
@@ -0,0 +1,786 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#include <string.h>
23#include "config.h"
24#include "kernel.h"
25#include "system.h"
26#include "queue.h"
27#include "corelock.h"
28#include "kernel-internal.h"
29#include "general.h"
30#include "panic.h"
31
32/* This array holds all queues that are initiated. It is used for broadcast. */
33static struct
34{
35 struct event_queue *queues[MAX_NUM_QUEUES+1];
36#ifdef HAVE_CORELOCK_OBJECT
37 struct corelock cl;
38#endif
39} all_queues SHAREDBSS_ATTR;
40
41/****************************************************************************
42 * Queue handling stuff
43 ****************************************************************************/
44
45#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
46/****************************************************************************
47 * Sender thread queue structure that aids implementation of priority
48 * inheritance on queues because the send list structure is the same as
49 * for all other kernel objects:
50 *
51 * Example state:
52 * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
53 * E3 was posted with queue_post
54 * 4 events remain enqueued (E1-E4)
55 *
56 * rd wr
57 * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
58 * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
59 * \/ \/ \/
60 * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
61 * q->send->curr_sender: /\
62 *
63 * Thread has E0 in its own struct queue_event.
64 *
65 ****************************************************************************/
66
67/* Puts the specified return value in the waiting thread's return value
68 * and wakes the thread.
69 *
70 * A sender should be confirmed to exist before calling which makes it
71 * more efficent to reject the majority of cases that don't need this
72 * called.
73 */
74static void queue_release_sender(struct thread_entry * volatile * sender,
75 intptr_t retval)
76{
77 struct thread_entry *thread = *sender;
78
79 *sender = NULL; /* Clear slot. */
80#ifdef HAVE_WAKEUP_EXT_CB
81 thread->wakeup_ext_cb = NULL; /* Clear callback. */
82#endif
83 thread->retval = retval; /* Assign thread-local return value. */
84 *thread->bqp = thread; /* Move blocking queue head to thread since
85 wakeup_thread wakes the first thread in
86 the list. */
87 wakeup_thread(thread->bqp);
88}
89
90/* Releases any waiting threads that are queued with queue_send -
91 * reply with 0.
92 */
93static void queue_release_all_senders(struct event_queue *q)
94{
95 if(q->send)
96 {
97 unsigned int i;
98 for(i = q->read; i != q->write; i++)
99 {
100 struct thread_entry **spp =
101 &q->send->senders[i & QUEUE_LENGTH_MASK];
102
103 if(*spp)
104 {
105 queue_release_sender(spp, 0);
106 }
107 }
108 }
109}
110
111/* Callback to do extra forced removal steps from sender list in addition
112 * to the normal blocking queue removal and priority dis-inherit */
113static void queue_remove_sender_thread_cb(struct thread_entry *thread)
114{
115 *((struct thread_entry **)thread->retval) = NULL;
116#ifdef HAVE_WAKEUP_EXT_CB
117 thread->wakeup_ext_cb = NULL;
118#endif
119 thread->retval = 0;
120}
121
122/* Enables queue_send on the specified queue - caller allocates the extra
123 * data structure. Only queues which are taken to be owned by a thread should
124 * enable this however an official owner is not compulsory but must be
125 * specified for priority inheritance to operate.
126 *
127 * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
128 * messages results in an undefined order of message replies or possible default
129 * replies if two or more waits happen before a reply is done.
130 */
131void queue_enable_queue_send(struct event_queue *q,
132 struct queue_sender_list *send,
133 unsigned int owner_id)
134{
135 int oldlevel = disable_irq_save();
136 corelock_lock(&q->cl);
137
138 if(send != NULL && q->send == NULL)
139 {
140 memset(send, 0, sizeof(*send));
141#ifdef HAVE_PRIORITY_SCHEDULING
142 send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
143 send->blocker.priority = PRIORITY_IDLE;
144 if(owner_id != 0)
145 {
146 send->blocker.thread = thread_id_entry(owner_id);
147 q->blocker_p = &send->blocker;
148 }
149#endif
150 q->send = send;
151 }
152
153 corelock_unlock(&q->cl);
154 restore_irq(oldlevel);
155
156 (void)owner_id;
157}
158
159/* Unblock a blocked thread at a given event index */
160static inline void queue_do_unblock_sender(struct queue_sender_list *send,
161 unsigned int i)
162{
163 if(send)
164 {
165 struct thread_entry **spp = &send->senders[i];
166
167 if(UNLIKELY(*spp))
168 {
169 queue_release_sender(spp, 0);
170 }
171 }
172}
173
174/* Perform the auto-reply sequence */
175static inline void queue_do_auto_reply(struct queue_sender_list *send)
176{
177 if(send && send->curr_sender)
178 {
179 /* auto-reply */
180 queue_release_sender(&send->curr_sender, 0);
181 }
182}
183
184/* Moves waiting thread's refrence from the senders array to the
185 * current_sender which represents the thread waiting for a reponse to the
186 * last message removed from the queue. This also protects the thread from
187 * being bumped due to overflow which would not be a valid action since its
188 * message _is_ being processed at this point. */
189static inline void queue_do_fetch_sender(struct queue_sender_list *send,
190 unsigned int rd)
191{
192 if(send)
193 {
194 struct thread_entry **spp = &send->senders[rd];
195
196 if(*spp)
197 {
198 /* Move thread reference from array to the next thread
199 that queue_reply will release */
200 send->curr_sender = *spp;
201 (*spp)->retval = (intptr_t)spp;
202 *spp = NULL;
203 }
204 /* else message was posted asynchronously with queue_post */
205 }
206}
207#else
208/* Empty macros for when synchoronous sending is not made */
209#define queue_release_all_senders(q)
210#define queue_do_unblock_sender(send, i)
211#define queue_do_auto_reply(send)
212#define queue_do_fetch_sender(send, rd)
213#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
214
215/* Queue must not be available for use during this call */
216void queue_init(struct event_queue *q, bool register_queue)
217{
218 int oldlevel = disable_irq_save();
219
220 if(register_queue)
221 {
222 corelock_lock(&all_queues.cl);
223 }
224
225 corelock_init(&q->cl);
226 q->queue = NULL;
227 /* What garbage is in write is irrelevant because of the masking design-
228 * any other functions the empty the queue do this as well so that
229 * queue_count and queue_empty return sane values in the case of a
230 * concurrent change without locking inside them. */
231 q->read = q->write;
232#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
233 q->send = NULL; /* No message sending by default */
234 IF_PRIO( q->blocker_p = NULL; )
235#endif
236
237 if(register_queue)
238 {
239 void **queues = (void **)all_queues.queues;
240 void **p = find_array_ptr(queues, q);
241
242 if(p - queues >= MAX_NUM_QUEUES)
243 {
244 panicf("queue_init->out of queues");
245 }
246
247 if(*p == NULL)
248 {
249 /* Add it to the all_queues array */
250 *p = q;
251 corelock_unlock(&all_queues.cl);
252 }
253 }
254
255 restore_irq(oldlevel);
256}
257
258/* Queue must not be available for use during this call */
259void queue_delete(struct event_queue *q)
260{
261 int oldlevel = disable_irq_save();
262 corelock_lock(&all_queues.cl);
263 corelock_lock(&q->cl);
264
265 /* Remove the queue if registered */
266 remove_array_ptr((void **)all_queues.queues, q);
267
268 corelock_unlock(&all_queues.cl);
269
270 /* Release thread(s) waiting on queue head */
271 thread_queue_wake(&q->queue);
272
273#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
274 if(q->send)
275 {
276 /* Release threads waiting for replies */
277 queue_release_all_senders(q);
278
279 /* Reply to any dequeued message waiting for one */
280 queue_do_auto_reply(q->send);
281
282 q->send = NULL;
283 IF_PRIO( q->blocker_p = NULL; )
284 }
285#endif
286
287 q->read = q->write;
288
289 corelock_unlock(&q->cl);
290 restore_irq(oldlevel);
291}
292
293/* NOTE: multiple threads waiting on a queue head cannot have a well-
294 defined release order if timeouts are used. If multiple threads must
295 access the queue head, use a dispatcher or queue_wait only. */
296void queue_wait(struct event_queue *q, struct queue_event *ev)
297{
298 int oldlevel;
299 unsigned int rd;
300
301#ifdef HAVE_PRIORITY_SCHEDULING
302 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
303 QUEUE_GET_THREAD(q) == thread_self_entry(),
304 "queue_wait->wrong thread\n");
305#endif
306
307 oldlevel = disable_irq_save();
308 corelock_lock(&q->cl);
309
310#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
311 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
312 queue_do_auto_reply(q->send);
313#endif
314
315 while(1)
316 {
317 struct thread_entry *current;
318
319 rd = q->read;
320 if (rd != q->write) /* A waking message could disappear */
321 break;
322
323 current = thread_self_entry();
324
325 IF_COP( current->obj_cl = &q->cl; )
326 current->bqp = &q->queue;
327
328 block_thread(current);
329
330 corelock_unlock(&q->cl);
331 switch_thread();
332
333 disable_irq();
334 corelock_lock(&q->cl);
335 }
336
337#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
338 if(ev)
339#endif
340 {
341 q->read = rd + 1;
342 rd &= QUEUE_LENGTH_MASK;
343 *ev = q->events[rd];
344
345 /* Get data for a waiting thread if one */
346 queue_do_fetch_sender(q->send, rd);
347 }
348 /* else just waiting on non-empty */
349
350 corelock_unlock(&q->cl);
351 restore_irq(oldlevel);
352}
353
354void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
355{
356 int oldlevel;
357 unsigned int rd, wr;
358
359 /* this function works only with a positive number (or zero) of ticks */
360 if (ticks == TIMEOUT_BLOCK)
361 {
362 queue_wait(q, ev);
363 return;
364 }
365
366#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
367 KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
368 QUEUE_GET_THREAD(q) == thread_self_entry(),
369 "queue_wait_w_tmo->wrong thread\n");
370#endif
371
372 oldlevel = disable_irq_save();
373 corelock_lock(&q->cl);
374
375#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
376 /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
377 queue_do_auto_reply(q->send);
378#endif
379
380 rd = q->read;
381 wr = q->write;
382 if (rd == wr && ticks > 0)
383 {
384 struct thread_entry *current = thread_self_entry();
385
386 IF_COP( current->obj_cl = &q->cl; )
387 current->bqp = &q->queue;
388
389 block_thread_w_tmo(current, ticks);
390 corelock_unlock(&q->cl);
391
392 switch_thread();
393
394 disable_irq();
395 corelock_lock(&q->cl);
396
397 rd = q->read;
398 wr = q->write;
399 }
400
401#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
402 if(ev)
403#endif
404 {
405 /* no worry about a removed message here - status is checked inside
406 locks - perhaps verify if timeout or false alarm */
407 if (rd != wr)
408 {
409 q->read = rd + 1;
410 rd &= QUEUE_LENGTH_MASK;
411 *ev = q->events[rd];
412 /* Get data for a waiting thread if one */
413 queue_do_fetch_sender(q->send, rd);
414 }
415 else
416 {
417 ev->id = SYS_TIMEOUT;
418 }
419 }
420 /* else just waiting on non-empty */
421
422 corelock_unlock(&q->cl);
423 restore_irq(oldlevel);
424}
425
426void queue_post(struct event_queue *q, long id, intptr_t data)
427{
428 int oldlevel;
429 unsigned int wr;
430
431 oldlevel = disable_irq_save();
432 corelock_lock(&q->cl);
433
434 wr = q->write++ & QUEUE_LENGTH_MASK;
435
436 KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
437 "queue_post ovf q=%08lX", (long)q);
438
439 q->events[wr].id = id;
440 q->events[wr].data = data;
441
442 /* overflow protect - unblock any thread waiting at this index */
443 queue_do_unblock_sender(q->send, wr);
444
445 /* Wakeup a waiting thread if any */
446 wakeup_thread(&q->queue);
447
448 corelock_unlock(&q->cl);
449 restore_irq(oldlevel);
450}
451
452#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
453/* IRQ handlers are not allowed use of this function - we only aim to
454 protect the queue integrity by turning them off. */
455intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
456{
457 int oldlevel;
458 unsigned int wr;
459
460 oldlevel = disable_irq_save();
461 corelock_lock(&q->cl);
462
463 wr = q->write++ & QUEUE_LENGTH_MASK;
464
465 KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
466 "queue_send ovf q=%08lX", (long)q);
467
468 q->events[wr].id = id;
469 q->events[wr].data = data;
470
471 if(LIKELY(q->send))
472 {
473 struct queue_sender_list *send = q->send;
474 struct thread_entry **spp = &send->senders[wr];
475 struct thread_entry *current = thread_self_entry();
476
477 if(UNLIKELY(*spp))
478 {
479 /* overflow protect - unblock any thread waiting at this index */
480 queue_release_sender(spp, 0);
481 }
482
483 /* Wakeup a waiting thread if any */
484 wakeup_thread(&q->queue);
485
486 /* Save thread in slot, add to list and wait for reply */
487 *spp = current;
488 IF_COP( current->obj_cl = &q->cl; )
489 IF_PRIO( current->blocker = q->blocker_p; )
490#ifdef HAVE_WAKEUP_EXT_CB
491 current->wakeup_ext_cb = queue_remove_sender_thread_cb;
492#endif
493 current->retval = (intptr_t)spp;
494 current->bqp = &send->list;
495
496 block_thread(current);
497
498 corelock_unlock(&q->cl);
499 switch_thread();
500
501 return current->retval;
502 }
503
504 /* Function as queue_post if sending is not enabled */
505 wakeup_thread(&q->queue);
506
507 corelock_unlock(&q->cl);
508 restore_irq(oldlevel);
509
510 return 0;
511}
512
513#if 0 /* not used now but probably will be later */
514/* Query if the last message dequeued was added by queue_send or not */
515bool queue_in_queue_send(struct event_queue *q)
516{
517 bool in_send;
518
519#if NUM_CORES > 1
520 int oldlevel = disable_irq_save();
521 corelock_lock(&q->cl);
522#endif
523
524 in_send = q->send && q->send->curr_sender;
525
526#if NUM_CORES > 1
527 corelock_unlock(&q->cl);
528 restore_irq(oldlevel);
529#endif
530
531 return in_send;
532}
533#endif
534
535/* Replies with retval to the last dequeued message sent with queue_send */
536void queue_reply(struct event_queue *q, intptr_t retval)
537{
538 if(q->send && q->send->curr_sender)
539 {
540 struct queue_sender_list *sender;
541
542 int oldlevel = disable_irq_save();
543 corelock_lock(&q->cl);
544
545 sender = q->send;
546
547 /* Double-check locking */
548 if(LIKELY(sender && sender->curr_sender))
549 queue_release_sender(&sender->curr_sender, retval);
550
551 corelock_unlock(&q->cl);
552 restore_irq(oldlevel);
553 }
554}
555#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
556
557#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
558/* Scan the even queue from head to tail, returning any event from the
559 filter list that was found, optionally removing the event. If an
560 event is returned, synchronous events are handled in the same manner as
561 with queue_wait(_w_tmo); if discarded, then as queue_clear.
562 If filters are NULL, any event matches. If filters exist, the default
563 is to search the full queue depth.
564 Earlier filters take precedence.
565
566 Return true if an event was found, false otherwise. */
567bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
568 unsigned int flags, const long (*filters)[2])
569{
570 bool have_msg;
571 unsigned int rd, wr;
572 int oldlevel;
573
574 if(LIKELY(q->read == q->write))
575 return false; /* Empty: do nothing further */
576
577 have_msg = false;
578
579 oldlevel = disable_irq_save();
580 corelock_lock(&q->cl);
581
582 /* Starting at the head, find first match */
583 for(rd = q->read, wr = q->write; rd != wr; rd++)
584 {
585 struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
586
587 if(filters)
588 {
589 /* Have filters - find the first thing that passes */
590 const long (* f)[2] = filters;
591 const long (* const f_last)[2] =
592 &filters[flags & QPEEK_FILTER_COUNT_MASK];
593 long id = e->id;
594
595 do
596 {
597 if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
598 goto passed_filter;
599 }
600 while(++f <= f_last);
601
602 if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
603 continue; /* No match; test next event */
604 else
605 break; /* Only check the head */
606 }
607 /* else - anything passes */
608
609 passed_filter:
610
611 /* Found a matching event */
612 have_msg = true;
613
614 if(ev)
615 *ev = *e; /* Caller wants the event */
616
617 if(flags & QPEEK_REMOVE_EVENTS)
618 {
619 /* Do event removal */
620 unsigned int r = q->read;
621 q->read = r + 1; /* Advance head */
622
623 if(ev)
624 {
625 /* Auto-reply */
626 queue_do_auto_reply(q->send);
627 /* Get the thread waiting for reply, if any */
628 queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
629 }
630 else
631 {
632 /* Release any thread waiting on this message */
633 queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
634 }
635
636 /* Slide messages forward into the gap if not at the head */
637 while(rd != r)
638 {
639 unsigned int dst = rd & QUEUE_LENGTH_MASK;
640 unsigned int src = --rd & QUEUE_LENGTH_MASK;
641
642 q->events[dst] = q->events[src];
643 /* Keep sender wait list in sync */
644 if(q->send)
645 q->send->senders[dst] = q->send->senders[src];
646 }
647 }
648
649 break;
650 }
651
652 corelock_unlock(&q->cl);
653 restore_irq(oldlevel);
654
655 return have_msg;
656}
657
658bool queue_peek(struct event_queue *q, struct queue_event *ev)
659{
660 return queue_peek_ex(q, ev, 0, NULL);
661}
662
663void queue_remove_from_head(struct event_queue *q, long id)
664{
665 const long f[2] = { id, id };
666 while (queue_peek_ex(q, NULL,
667 QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
668}
669#else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
670/* The more powerful routines aren't required */
671bool queue_peek(struct event_queue *q, struct queue_event *ev)
672{
673 unsigned int rd;
674
675 if(q->read == q->write)
676 return false;
677
678 bool have_msg = false;
679
680 int oldlevel = disable_irq_save();
681 corelock_lock(&q->cl);
682
683 rd = q->read;
684 if(rd != q->write)
685 {
686 *ev = q->events[rd & QUEUE_LENGTH_MASK];
687 have_msg = true;
688 }
689
690 corelock_unlock(&q->cl);
691 restore_irq(oldlevel);
692
693 return have_msg;
694}
695
696void queue_remove_from_head(struct event_queue *q, long id)
697{
698 int oldlevel;
699
700 oldlevel = disable_irq_save();
701 corelock_lock(&q->cl);
702
703 while(q->read != q->write)
704 {
705 unsigned int rd = q->read & QUEUE_LENGTH_MASK;
706
707 if(q->events[rd].id != id)
708 {
709 break;
710 }
711
712 /* Release any thread waiting on this message */
713 queue_do_unblock_sender(q->send, rd);
714
715 q->read++;
716 }
717
718 corelock_unlock(&q->cl);
719 restore_irq(oldlevel);
720}
721#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
722
723/* Poll queue to see if a message exists - careful in using the result if
724 * queue_remove_from_head is called when messages are posted - possibly use
725 * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
726 * unsignals the queue may cause an unwanted block */
727bool queue_empty(const struct event_queue* q)
728{
729 return ( q->read == q->write );
730}
731
732void queue_clear(struct event_queue* q)
733{
734 int oldlevel;
735
736 oldlevel = disable_irq_save();
737 corelock_lock(&q->cl);
738
739 /* Release all threads waiting in the queue for a reply -
740 dequeued sent message will be handled by owning thread */
741 queue_release_all_senders(q);
742
743 q->read = q->write;
744
745 corelock_unlock(&q->cl);
746 restore_irq(oldlevel);
747}
748
749/**
750 * The number of events waiting in the queue.
751 *
752 * @param struct of event_queue
753 * @return number of events in the queue
754 */
755int queue_count(const struct event_queue *q)
756{
757 return q->write - q->read;
758}
759
760int queue_broadcast(long id, intptr_t data)
761{
762 struct event_queue **p = all_queues.queues;
763 struct event_queue *q;
764
765#if NUM_CORES > 1
766 int oldlevel = disable_irq_save();
767 corelock_lock(&all_queues.cl);
768#endif
769
770 for(q = *p; q != NULL; q = *(++p))
771 {
772 queue_post(q, id, data);
773 }
774
775#if NUM_CORES > 1
776 corelock_unlock(&all_queues.cl);
777 restore_irq(oldlevel);
778#endif
779
780 return p - all_queues.queues;
781}
782
783void init_queues(void)
784{
785 corelock_init(&all_queues.cl);
786}
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c
new file mode 100644
index 0000000000..f9ff0ad987
--- /dev/null
+++ b/firmware/kernel/semaphore.c
@@ -0,0 +1,142 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22
23/****************************************************************************
24 * Simple mutex functions ;)
25 ****************************************************************************/
26
27#include <stdbool.h>
28#include "config.h"
29#include "kernel.h"
30#include "semaphore.h"
31#include "kernel-internal.h"
32#include "thread-internal.h"
33
34/****************************************************************************
35 * Simple semaphore functions ;)
36 ****************************************************************************/
37/* Initialize the semaphore object.
38 * max = maximum up count the semaphore may assume (max >= 1)
39 * start = initial count of semaphore (0 <= count <= max) */
40void semaphore_init(struct semaphore *s, int max, int start)
41{
42 KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
43 "semaphore_init->inv arg\n");
44 s->queue = NULL;
45 s->max = max;
46 s->count = start;
47 corelock_init(&s->cl);
48}
49
50/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
51 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
52 * safely be used in an ISR. */
53int semaphore_wait(struct semaphore *s, int timeout)
54{
55 int ret;
56 int oldlevel;
57 int count;
58
59 oldlevel = disable_irq_save();
60 corelock_lock(&s->cl);
61
62 count = s->count;
63
64 if(LIKELY(count > 0))
65 {
66 /* count is not zero; down it */
67 s->count = count - 1;
68 ret = OBJ_WAIT_SUCCEEDED;
69 }
70 else if(timeout == 0)
71 {
72 /* just polling it */
73 ret = OBJ_WAIT_TIMEDOUT;
74 }
75 else
76 {
77 /* too many waits - block until count is upped... */
78 struct thread_entry * current = thread_self_entry();
79 IF_COP( current->obj_cl = &s->cl; )
80 current->bqp = &s->queue;
81 /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
82 * explicit in semaphore_release */
83 current->retval = OBJ_WAIT_TIMEDOUT;
84
85 if(timeout > 0)
86 block_thread_w_tmo(current, timeout); /* ...or timed out... */
87 else
88 block_thread(current); /* -timeout = infinite */
89
90 corelock_unlock(&s->cl);
91
92 /* ...and turn control over to next thread */
93 switch_thread();
94
95 return current->retval;
96 }
97
98 corelock_unlock(&s->cl);
99 restore_irq(oldlevel);
100
101 return ret;
102}
103
104/* Up the semaphore's count and release any thread waiting at the head of the
105 * queue. The count is saturated to the value of the 'max' parameter specified
106 * in 'semaphore_init'. */
107void semaphore_release(struct semaphore *s)
108{
109 unsigned int result = THREAD_NONE;
110 int oldlevel;
111
112 oldlevel = disable_irq_save();
113 corelock_lock(&s->cl);
114
115 if(LIKELY(s->queue != NULL))
116 {
117 /* a thread was queued - wake it up and keep count at 0 */
118 KERNEL_ASSERT(s->count == 0,
119 "semaphore_release->threads queued but count=%d!\n", s->count);
120 s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
121 result = wakeup_thread(&s->queue);
122 }
123 else
124 {
125 int count = s->count;
126 if(count < s->max)
127 {
128 /* nothing waiting - up it */
129 s->count = count + 1;
130 }
131 }
132
133 corelock_unlock(&s->cl);
134 restore_irq(oldlevel);
135
136#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
137 /* No thread switch if not thread context */
138 if((result & THREAD_SWITCH) && is_thread_context())
139 switch_thread();
140#endif
141 (void)result;
142}
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h
new file mode 100644
index 0000000000..c2acdfbaa9
--- /dev/null
+++ b/firmware/kernel/thread-internal.h
@@ -0,0 +1,357 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef THREAD_H
23#define THREAD_H
24
25#include "config.h"
26#include <inttypes.h>
27#include <stddef.h>
28#include <stdbool.h>
29#include "gcc_extensions.h"
30
31/*
32 * We need more stack when we run under a host
33 * maybe more expensive C lib functions?
34 *
35 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
36
37#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
38struct regs
39{
40 void *t; /* OS thread */
41 void *told; /* Last thread in slot (explained in thead-sdl.c) */
42 void *s; /* Semaphore for blocking and wakeup */
43 void (*start)(void); /* Start function */
44};
45
46#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
47#else
48#include "asm/thread.h"
49#endif /* HAVE_SDL_THREADS */
50
51#ifdef CPU_PP
52#ifdef HAVE_CORELOCK_OBJECT
53/* No reliable atomic instruction available - use Peterson's algorithm */
54struct corelock
55{
56 volatile unsigned char myl[NUM_CORES];
57 volatile unsigned char turn;
58} __attribute__((packed));
59
60/* Too big to inline everywhere */
61void corelock_init(struct corelock *cl);
62void corelock_lock(struct corelock *cl);
63int corelock_try_lock(struct corelock *cl);
64void corelock_unlock(struct corelock *cl);
65#endif /* HAVE_CORELOCK_OBJECT */
66#endif /* CPU_PP */
67
68/* NOTE: The use of the word "queue" may also refer to a linked list of
69 threads being maintained that are normally dealt with in FIFO order
70 and not necessarily kernel event_queue */
71enum
72{
73 /* States without a timeout must be first */
74 STATE_KILLED = 0, /* Thread is killed (default) */
75 STATE_RUNNING, /* Thread is currently running */
76 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
77 /* These states involve adding the thread to the tmo list */
78 STATE_SLEEPING, /* Thread is sleeping with a timeout */
79 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
80 /* Miscellaneous states */
81 STATE_FROZEN, /* Thread is suspended and will not run until
82 thread_thaw is called with its ID */
83 THREAD_NUM_STATES,
84 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
85};
86
87#if NUM_CORES > 1
88/* Pointer value for name field to indicate thread is being killed. Using
89 * an alternate STATE_* won't work since that would interfere with operation
90 * while the thread is still running. */
91#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
92#endif
93
94/* Link information for lists thread is in */
95struct thread_entry; /* forward */
96struct thread_list
97{
98 struct thread_entry *prev; /* Previous thread in a list */
99 struct thread_entry *next; /* Next thread in a list */
100};
101
102#ifndef HAVE_CORELOCK_OBJECT
103/* No atomic corelock op needed or just none defined */
104#define corelock_init(cl)
105#define corelock_lock(cl)
106#define corelock_try_lock(cl)
107#define corelock_unlock(cl)
108#endif /* HAVE_CORELOCK_OBJECT */
109
110#ifdef HAVE_PRIORITY_SCHEDULING
111struct blocker
112{
113 struct thread_entry * volatile thread; /* thread blocking other threads
114 (aka. object owner) */
115 int priority; /* highest priority waiter */
116 struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
117};
118
119/* Choices of wakeup protocol */
120
121/* For transfer of object ownership by one thread to another thread by
122 * the owning thread itself (mutexes) */
123struct thread_entry *
124 wakeup_priority_protocol_transfer(struct thread_entry *thread);
125
126/* For release by owner where ownership doesn't change - other threads,
127 * interrupts, timeouts, etc. (mutex timeout, queues) */
128struct thread_entry *
129 wakeup_priority_protocol_release(struct thread_entry *thread);
130
131
132struct priority_distribution
133{
134 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
135 uint32_t mask; /* Bitmask of hist entries that are not zero */
136};
137
138#endif /* HAVE_PRIORITY_SCHEDULING */
139
140/* Information kept in each thread slot
141 * members are arranged according to size - largest first - in order
142 * to ensure both alignment and packing at the same time.
143 */
144struct thread_entry
145{
146 struct regs context; /* Register context at switch -
147 _must_ be first member */
148 uintptr_t *stack; /* Pointer to top of stack */
149 const char *name; /* Thread name */
150 long tmo_tick; /* Tick when thread should be woken from
151 timeout -
152 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
153 struct thread_list l; /* Links for blocked/waking/running -
154 circular linkage in both directions */
155 struct thread_list tmo; /* Links for timeout list -
156 Circular in reverse direction, NULL-terminated in
157 forward direction -
158 states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
159 struct thread_entry **bqp; /* Pointer to list variable in kernel
160 object where thread is blocked - used
161 for implicit unblock and explicit wake
162 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
163#ifdef HAVE_CORELOCK_OBJECT
164 struct corelock *obj_cl; /* Object corelock where thead is blocked -
165 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
166 struct corelock waiter_cl; /* Corelock for thread_wait */
167 struct corelock slot_cl; /* Corelock to lock thread slot */
168 unsigned char core; /* The core to which thread belongs */
169#endif
170 struct thread_entry *queue; /* List of threads waiting for thread to be
171 removed */
172#ifdef HAVE_WAKEUP_EXT_CB
173 void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
174 performs special steps needed when being
175 forced off of an object's wait queue that
176 go beyond the standard wait queue removal
177 and priority disinheritance */
178 /* Only enabled when using queue_send for now */
179#endif
180#if defined(HAVE_SEMAPHORE_OBJECTS) || \
181 defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
182 NUM_CORES > 1
183 volatile intptr_t retval; /* Return value from a blocked operation/
184 misc. use */
185#endif
186#ifdef HAVE_PRIORITY_SCHEDULING
187 /* Priority summary of owned objects that support inheritance */
188 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
189 on an object that supports PIP -
190 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
191 struct priority_distribution pdist; /* Priority summary of owned objects
192 that have blocked threads and thread's own
193 base priority */
194 int skip_count; /* Number of times skipped if higher priority
195 thread was running */
196 unsigned char base_priority; /* Base priority (set explicitly during
197 creation or thread_set_priority) */
198 unsigned char priority; /* Scheduled priority (higher of base or
199 all threads blocked by this one) */
200#endif
201 uint16_t id; /* Current slot id */
202 unsigned short stack_size; /* Size of stack in bytes */
203 unsigned char state; /* Thread slot state (STATE_*) */
204#ifdef HAVE_SCHEDULER_BOOSTCTRL
205 unsigned char cpu_boost; /* CPU frequency boost flag */
206#endif
207#ifdef HAVE_IO_PRIORITY
208 unsigned char io_priority;
209#endif
210};
211
212/*** Macros for internal use ***/
213/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
214#define THREAD_ID_VERSION_SHIFT 8
215#define THREAD_ID_VERSION_MASK 0xff00
216#define THREAD_ID_SLOT_MASK 0x00ff
217#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
218
219#ifdef HAVE_CORELOCK_OBJECT
220/* Operations to be performed just before stopping a thread and starting
221 a new one if specified before calling switch_thread */
222enum
223{
224 TBOP_CLEAR = 0, /* No operation to do */
225 TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
226 TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
227};
228
229struct thread_blk_ops
230{
231 struct corelock *cl_p; /* pointer to corelock */
232 unsigned char flags; /* TBOP_* flags */
233};
234#endif /* NUM_CORES > 1 */
235
236/* Information kept for each core
237 * Members are arranged for the same reason as in thread_entry
238 */
239struct core_entry
240{
241 /* "Active" lists - core is constantly active on these and are never
242 locked and interrupts do not access them */
243 struct thread_entry *running; /* threads that are running (RTR) */
244 struct thread_entry *timeout; /* threads that are on a timeout before
245 running again */
246 struct thread_entry *block_task; /* Task going off running list */
247#ifdef HAVE_PRIORITY_SCHEDULING
248 struct priority_distribution rtr; /* Summary of running and ready-to-run
249 threads */
250#endif
251 long next_tmo_check; /* soonest time to check tmo threads */
252#ifdef HAVE_CORELOCK_OBJECT
253 struct thread_blk_ops blk_ops; /* operations to perform when
254 blocking a thread */
255 struct corelock rtr_cl; /* Lock for rtr list */
256#endif /* NUM_CORES */
257};
258
259#ifdef HAVE_PRIORITY_SCHEDULING
260#define IF_PRIO(...) __VA_ARGS__
261#define IFN_PRIO(...)
262#else
263#define IF_PRIO(...)
264#define IFN_PRIO(...) __VA_ARGS__
265#endif
266
267void core_idle(void);
268void core_wake(IF_COP_VOID(unsigned int core));
269
270/* Initialize the scheduler */
271void init_threads(void) INIT_ATTR;
272
273/* Allocate a thread in the scheduler */
274#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
275unsigned int create_thread(void (*function)(void),
276 void* stack, size_t stack_size,
277 unsigned flags, const char *name
278 IF_PRIO(, int priority)
279 IF_COP(, unsigned int core));
280
281/* Set and clear the CPU frequency boost flag for the calling thread */
282#ifdef HAVE_SCHEDULER_BOOSTCTRL
283void trigger_cpu_boost(void);
284void cancel_cpu_boost(void);
285#else
286#define trigger_cpu_boost() do { } while(0)
287#define cancel_cpu_boost() do { } while(0)
288#endif
289/* Return thread entry from id */
290struct thread_entry *thread_id_entry(unsigned int thread_id);
291/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
292 * Has no effect on a thread not frozen. */
293void thread_thaw(unsigned int thread_id);
294/* Wait for a thread to exit */
295void thread_wait(unsigned int thread_id);
296/* Exit the current thread */
297void thread_exit(void) NORETURN_ATTR;
298#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
299#define ALLOW_REMOVE_THREAD
300/* Remove a thread from the scheduler */
301void remove_thread(unsigned int thread_id);
302#endif
303
304/* Switch to next runnable thread */
305void switch_thread(void);
306/* Blocks a thread for at least the specified number of ticks (0 = wait until
307 * next tick) */
308void sleep_thread(int ticks);
309/* Indefinitely blocks the current thread on a thread queue */
310void block_thread(struct thread_entry *current);
311/* Blocks the current thread on a thread queue until explicitely woken or
312 * the timeout is reached */
313void block_thread_w_tmo(struct thread_entry *current, int timeout);
314
315/* Return bit flags for thread wakeup */
316#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
317#define THREAD_OK 0x1 /* A thread was woken up */
318#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
319 higher priority than current were woken) */
320
321/* A convenience function for waking an entire queue of threads. */
322unsigned int thread_queue_wake(struct thread_entry **list);
323
324/* Wakeup a thread at the head of a list */
325unsigned int wakeup_thread(struct thread_entry **list);
326
327#ifdef HAVE_PRIORITY_SCHEDULING
328int thread_set_priority(unsigned int thread_id, int priority);
329int thread_get_priority(unsigned int thread_id);
330#endif /* HAVE_PRIORITY_SCHEDULING */
331#ifdef HAVE_IO_PRIORITY
332void thread_set_io_priority(unsigned int thread_id, int io_priority);
333int thread_get_io_priority(unsigned int thread_id);
334#endif /* HAVE_IO_PRIORITY */
335#if NUM_CORES > 1
336unsigned int switch_core(unsigned int new_core);
337#endif
338
339/* Return the id of the calling thread. */
340unsigned int thread_self(void);
341
342/* Return the thread_entry for the calling thread.
343 * INTERNAL: Intended for use by kernel and not for programs. */
344struct thread_entry* thread_self_entry(void);
345
346/* Debugging info - only! */
347int thread_stack_usage(const struct thread_entry *thread);
348#if NUM_CORES > 1
349int idle_stack_usage(unsigned int core);
350#endif
351void thread_get_name(char *buffer, int size,
352 struct thread_entry *thread);
353#ifdef RB_PROFILE
354void profile_thread(void);
355#endif
356
357#endif /* THREAD_H */
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
new file mode 100644
index 0000000000..43ff584a68
--- /dev/null
+++ b/firmware/kernel/thread.c
@@ -0,0 +1,2442 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23#ifdef HAVE_SIGALTSTACK_THREADS
24/*
25 * The sp check in glibc __longjmp_chk() will cause
26 * a fatal error when switching threads via longjmp().
27 */
28#undef _FORTIFY_SOURCE
29#endif
30
31#include <stdbool.h>
32#include <stdio.h>
33#include "thread.h"
34#include "panic.h"
35#include "system.h"
36#include "kernel.h"
37#include "cpu.h"
38#include "string.h"
39#ifdef RB_PROFILE
40#include <profile.h>
41#endif
42#include "core_alloc.h"
43#include "gcc_extensions.h"
44#include "corelock.h"
45
46/****************************************************************************
47 * ATTENTION!! *
48 * See notes below on implementing processor-specific portions! *
49 ***************************************************************************/
50
51/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
52#ifdef DEBUG
53#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
54#else
55#define THREAD_EXTRA_CHECKS 0
56#endif
57
58/**
59 * General locking order to guarantee progress. Order must be observed but
60 * all stages are not nescessarily obligatory. Going from 1) to 3) is
61 * perfectly legal.
62 *
63 * 1) IRQ
64 * This is first because of the likelyhood of having an interrupt occur that
65 * also accesses one of the objects farther down the list. Any non-blocking
66 * synchronization done may already have a lock on something during normal
67 * execution and if an interrupt handler running on the same processor as
68 * the one that has the resource locked were to attempt to access the
69 * resource, the interrupt handler would wait forever waiting for an unlock
70 * that will never happen. There is no danger if the interrupt occurs on
71 * a different processor because the one that has the lock will eventually
72 * unlock and the other processor's handler may proceed at that time. Not
73 * nescessary when the resource in question is definitely not available to
74 * interrupt handlers.
75 *
76 * 2) Kernel Object
77 * 1) May be needed beforehand if the kernel object allows dual-use such as
78 * event queues. The kernel object must have a scheme to protect itself from
79 * access by another processor and is responsible for serializing the calls
80 * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
81 * other. Objects' queues are also protected here.
82 *
83 * 3) Thread Slot
84 * This locks access to the thread's slot such that its state cannot be
85 * altered by another processor when a state change is in progress such as
86 * when it is in the process of going on a blocked list. An attempt to wake
87 * a thread while it is still blocking will likely desync its state with
88 * the other resources used for that state.
89 *
90 * 4) Core Lists
91 * These lists are specific to a particular processor core and are accessible
92 * by all processor cores and interrupt handlers. The running (rtr) list is
93 * the prime example where a thread may be added by any means.
94 */
95
96/*---------------------------------------------------------------------------
97 * Processor specific: core_sleep/core_wake/misc. notes
98 *
99 * ARM notes:
100 * FIQ is not dealt with by the scheduler code and is simply restored if it
101 * must by masked for some reason - because threading modifies a register
102 * that FIQ may also modify and there's no way to accomplish it atomically.
103 * s3c2440 is such a case.
104 *
105 * Audio interrupts are generally treated at a higher priority than others
106 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
107 * are not in general safe. Special cases may be constructed on a per-
108 * source basis and blocking operations are not available.
109 *
110 * core_sleep procedure to implement for any CPU to ensure an asychronous
111 * wakup never results in requiring a wait until the next tick (up to
112 * 10000uS!). May require assembly and careful instruction ordering.
113 *
114 * 1) On multicore, stay awake if directed to do so by another. If so, goto
115 * step 4.
116 * 2) If processor requires, atomically reenable interrupts and perform step
117 * 3.
118 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
119 * on Coldfire) goto step 5.
120 * 4) Enable interrupts.
121 * 5) Exit procedure.
122 *
123 * core_wake and multprocessor notes for sleep/wake coordination:
124 * If possible, to wake up another processor, the forcing of an interrupt on
125 * the woken core by the waker core is the easiest way to ensure a non-
126 * delayed wake and immediate execution of any woken threads. If that isn't
127 * available then some careful non-blocking synchonization is needed (as on
128 * PP targets at the moment).
129 *---------------------------------------------------------------------------
130 */
131
132/* Cast to the the machine pointer size, whose size could be < 4 or > 32
133 * (someday :). */
134#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
135static struct core_entry cores[NUM_CORES] IBSS_ATTR;
136struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
137
138static const char main_thread_name[] = "main";
139#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
140extern uintptr_t stackbegin[];
141extern uintptr_t stackend[];
142#else
143extern uintptr_t *stackbegin;
144extern uintptr_t *stackend;
145#endif
146
147static inline void core_sleep(IF_COP_VOID(unsigned int core))
148 __attribute__((always_inline));
149
150void check_tmo_threads(void)
151 __attribute__((noinline));
152
153static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
154 __attribute__((always_inline));
155
156static void add_to_list_tmo(struct thread_entry *thread)
157 __attribute__((noinline));
158
159static void core_schedule_wakeup(struct thread_entry *thread)
160 __attribute__((noinline));
161
162#if NUM_CORES > 1
163static inline void run_blocking_ops(
164 unsigned int core, struct thread_entry *thread)
165 __attribute__((always_inline));
166#endif
167
168static void thread_stkov(struct thread_entry *thread)
169 __attribute__((noinline));
170
171static inline void store_context(void* addr)
172 __attribute__((always_inline));
173
174static inline void load_context(const void* addr)
175 __attribute__((always_inline));
176
177#if NUM_CORES > 1
178static void thread_final_exit_do(struct thread_entry *current)
179 __attribute__((noinline)) NORETURN_ATTR USED_ATTR;
180#else
181static inline void thread_final_exit(struct thread_entry *current)
182 __attribute__((always_inline)) NORETURN_ATTR;
183#endif
184
185void switch_thread(void)
186 __attribute__((noinline));
187
188/****************************************************************************
189 * Processor/OS-specific section - include necessary core support
190 */
191
192
193#include "asm/thread.c"
194
195#if defined (CPU_PP)
196#include "thread-pp.c"
197#endif /* CPU_PP */
198
199#ifndef IF_NO_SKIP_YIELD
200#define IF_NO_SKIP_YIELD(...)
201#endif
202
203/*
204 * End Processor-specific section
205 ***************************************************************************/
206
207#if THREAD_EXTRA_CHECKS
208static void thread_panicf(const char *msg, struct thread_entry *thread)
209{
210 IF_COP( const unsigned int core = thread->core; )
211 static char name[32];
212 thread_get_name(name, 32, thread);
213 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
214}
215static void thread_stkov(struct thread_entry *thread)
216{
217 thread_panicf("Stkov", thread);
218}
219#define THREAD_PANICF(msg, thread) \
220 thread_panicf(msg, thread)
221#define THREAD_ASSERT(exp, msg, thread) \
222 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
223#else
224static void thread_stkov(struct thread_entry *thread)
225{
226 IF_COP( const unsigned int core = thread->core; )
227 static char name[32];
228 thread_get_name(name, 32, thread);
229 panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
230}
231#define THREAD_PANICF(msg, thread)
232#define THREAD_ASSERT(exp, msg, thread)
233#endif /* THREAD_EXTRA_CHECKS */
234
235/* Thread locking */
236#if NUM_CORES > 1
237#define LOCK_THREAD(thread) \
238 ({ corelock_lock(&(thread)->slot_cl); })
239#define TRY_LOCK_THREAD(thread) \
240 ({ corelock_try_lock(&(thread)->slot_cl); })
241#define UNLOCK_THREAD(thread) \
242 ({ corelock_unlock(&(thread)->slot_cl); })
243#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
244 ({ unsigned int _core = (thread)->core; \
245 cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
246 cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
247#else
248#define LOCK_THREAD(thread) \
249 ({ })
250#define TRY_LOCK_THREAD(thread) \
251 ({ })
252#define UNLOCK_THREAD(thread) \
253 ({ })
254#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
255 ({ })
256#endif
257
258/* RTR list */
259#define RTR_LOCK(core) \
260 ({ corelock_lock(&cores[core].rtr_cl); })
261#define RTR_UNLOCK(core) \
262 ({ corelock_unlock(&cores[core].rtr_cl); })
263
264#ifdef HAVE_PRIORITY_SCHEDULING
265#define rtr_add_entry(core, priority) \
266 prio_add_entry(&cores[core].rtr, (priority))
267
268#define rtr_subtract_entry(core, priority) \
269 prio_subtract_entry(&cores[core].rtr, (priority))
270
271#define rtr_move_entry(core, from, to) \
272 prio_move_entry(&cores[core].rtr, (from), (to))
273#else
274#define rtr_add_entry(core, priority)
275#define rtr_add_entry_inl(core, priority)
276#define rtr_subtract_entry(core, priority)
277#define rtr_subtract_entry_inl(core, priotity)
278#define rtr_move_entry(core, from, to)
279#define rtr_move_entry_inl(core, from, to)
280#endif
281
282/*---------------------------------------------------------------------------
283 * Thread list structure - circular:
284 * +------------------------------+
285 * | |
286 * +--+---+<-+---+<-+---+<-+---+<-+
287 * Head->| T | | T | | T | | T |
288 * +->+---+->+---+->+---+->+---+--+
289 * | |
290 * +------------------------------+
291 *---------------------------------------------------------------------------
292 */
293
294/*---------------------------------------------------------------------------
295 * Adds a thread to a list of threads using "insert last". Uses the "l"
296 * links.
297 *---------------------------------------------------------------------------
298 */
299static void add_to_list_l(struct thread_entry **list,
300 struct thread_entry *thread)
301{
302 struct thread_entry *l = *list;
303
304 if (l == NULL)
305 {
306 /* Insert into unoccupied list */
307 thread->l.prev = thread;
308 thread->l.next = thread;
309 *list = thread;
310 return;
311 }
312
313 /* Insert last */
314 thread->l.prev = l->l.prev;
315 thread->l.next = l;
316 l->l.prev->l.next = thread;
317 l->l.prev = thread;
318}
319
320/*---------------------------------------------------------------------------
321 * Removes a thread from a list of threads. Uses the "l" links.
322 *---------------------------------------------------------------------------
323 */
324static void remove_from_list_l(struct thread_entry **list,
325 struct thread_entry *thread)
326{
327 struct thread_entry *prev, *next;
328
329 next = thread->l.next;
330
331 if (thread == next)
332 {
333 /* The only item */
334 *list = NULL;
335 return;
336 }
337
338 if (thread == *list)
339 {
340 /* List becomes next item */
341 *list = next;
342 }
343
344 prev = thread->l.prev;
345
346 /* Fix links to jump over the removed entry. */
347 next->l.prev = prev;
348 prev->l.next = next;
349}
350
351/*---------------------------------------------------------------------------
352 * Timeout list structure - circular reverse (to make "remove item" O(1)),
353 * NULL-terminated forward (to ease the far more common forward traversal):
354 * +------------------------------+
355 * | |
356 * +--+---+<-+---+<-+---+<-+---+<-+
357 * Head->| T | | T | | T | | T |
358 * +---+->+---+->+---+->+---+-X
359 *---------------------------------------------------------------------------
360 */
361
362/*---------------------------------------------------------------------------
363 * Add a thread from the core's timout list by linking the pointers in its
364 * tmo structure.
365 *---------------------------------------------------------------------------
366 */
367static void add_to_list_tmo(struct thread_entry *thread)
368{
369 struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
370 THREAD_ASSERT(thread->tmo.prev == NULL,
371 "add_to_list_tmo->already listed", thread);
372
373 thread->tmo.next = NULL;
374
375 if (tmo == NULL)
376 {
377 /* Insert into unoccupied list */
378 thread->tmo.prev = thread;
379 cores[IF_COP_CORE(thread->core)].timeout = thread;
380 return;
381 }
382
383 /* Insert Last */
384 thread->tmo.prev = tmo->tmo.prev;
385 tmo->tmo.prev->tmo.next = thread;
386 tmo->tmo.prev = thread;
387}
388
389/*---------------------------------------------------------------------------
390 * Remove a thread from the core's timout list by unlinking the pointers in
391 * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
392 * is cancelled.
393 *---------------------------------------------------------------------------
394 */
395static void remove_from_list_tmo(struct thread_entry *thread)
396{
397 struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
398 struct thread_entry *prev = thread->tmo.prev;
399 struct thread_entry *next = thread->tmo.next;
400
401 THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
402
403 if (next != NULL)
404 next->tmo.prev = prev;
405
406 if (thread == *list)
407 {
408 /* List becomes next item and empty if next == NULL */
409 *list = next;
410 /* Mark as unlisted */
411 thread->tmo.prev = NULL;
412 }
413 else
414 {
415 if (next == NULL)
416 (*list)->tmo.prev = prev;
417 prev->tmo.next = next;
418 /* Mark as unlisted */
419 thread->tmo.prev = NULL;
420 }
421}
422
423
424#ifdef HAVE_PRIORITY_SCHEDULING
425/*---------------------------------------------------------------------------
426 * Priority distribution structure (one category for each possible priority):
427 *
428 * +----+----+----+ ... +-----+
429 * hist: | F0 | F1 | F2 | | F31 |
430 * +----+----+----+ ... +-----+
431 * mask: | b0 | b1 | b2 | | b31 |
432 * +----+----+----+ ... +-----+
433 *
434 * F = count of threads at priority category n (frequency)
435 * b = bitmask of non-zero priority categories (occupancy)
436 *
437 * / if H[n] != 0 : 1
438 * b[n] = |
439 * \ else : 0
440 *
441 *---------------------------------------------------------------------------
442 * Basic priority inheritance priotocol (PIP):
443 *
444 * Mn = mutex n, Tn = thread n
445 *
446 * A lower priority thread inherits the priority of the highest priority
447 * thread blocked waiting for it to complete an action (such as release a
448 * mutex or respond to a message via queue_send):
449 *
450 * 1) T2->M1->T1
451 *
452 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
453 * priority than T1 then T1 inherits the priority of T2.
454 *
455 * 2) T3
456 * \/
457 * T2->M1->T1
458 *
459 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
460 * T1 inherits the higher of T2 and T3.
461 *
462 * 3) T3->M2->T2->M1->T1
463 *
464 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
465 * then T1 inherits the priority of T3 through T2.
466 *
467 * Blocking chains can grow arbitrarily complex (though it's best that they
468 * not form at all very often :) and build-up from these units.
469 *---------------------------------------------------------------------------
470 */
471
472/*---------------------------------------------------------------------------
473 * Increment frequency at category "priority"
474 *---------------------------------------------------------------------------
475 */
476static inline unsigned int prio_add_entry(
477 struct priority_distribution *pd, int priority)
478{
479 unsigned int count;
480 /* Enough size/instruction count difference for ARM makes it worth it to
481 * use different code (192 bytes for ARM). Only thing better is ASM. */
482#ifdef CPU_ARM
483 count = pd->hist[priority];
484 if (++count == 1)
485 pd->mask |= 1 << priority;
486 pd->hist[priority] = count;
487#else /* This one's better for Coldfire */
488 if ((count = ++pd->hist[priority]) == 1)
489 pd->mask |= 1 << priority;
490#endif
491
492 return count;
493}
494
495/*---------------------------------------------------------------------------
496 * Decrement frequency at category "priority"
497 *---------------------------------------------------------------------------
498 */
499static inline unsigned int prio_subtract_entry(
500 struct priority_distribution *pd, int priority)
501{
502 unsigned int count;
503
504#ifdef CPU_ARM
505 count = pd->hist[priority];
506 if (--count == 0)
507 pd->mask &= ~(1 << priority);
508 pd->hist[priority] = count;
509#else
510 if ((count = --pd->hist[priority]) == 0)
511 pd->mask &= ~(1 << priority);
512#endif
513
514 return count;
515}
516
517/*---------------------------------------------------------------------------
518 * Remove from one category and add to another
519 *---------------------------------------------------------------------------
520 */
521static inline void prio_move_entry(
522 struct priority_distribution *pd, int from, int to)
523{
524 uint32_t mask = pd->mask;
525
526#ifdef CPU_ARM
527 unsigned int count;
528
529 count = pd->hist[from];
530 if (--count == 0)
531 mask &= ~(1 << from);
532 pd->hist[from] = count;
533
534 count = pd->hist[to];
535 if (++count == 1)
536 mask |= 1 << to;
537 pd->hist[to] = count;
538#else
539 if (--pd->hist[from] == 0)
540 mask &= ~(1 << from);
541
542 if (++pd->hist[to] == 1)
543 mask |= 1 << to;
544#endif
545
546 pd->mask = mask;
547}
548
549/*---------------------------------------------------------------------------
550 * Change the priority and rtr entry for a running thread
551 *---------------------------------------------------------------------------
552 */
553static inline void set_running_thread_priority(
554 struct thread_entry *thread, int priority)
555{
556 const unsigned int core = IF_COP_CORE(thread->core);
557 RTR_LOCK(core);
558 rtr_move_entry(core, thread->priority, priority);
559 thread->priority = priority;
560 RTR_UNLOCK(core);
561}
562
563/*---------------------------------------------------------------------------
564 * Finds the highest priority thread in a list of threads. If the list is
565 * empty, the PRIORITY_IDLE is returned.
566 *
567 * It is possible to use the struct priority_distribution within an object
568 * instead of scanning the remaining threads in the list but as a compromise,
569 * the resulting per-object memory overhead is saved at a slight speed
570 * penalty under high contention.
571 *---------------------------------------------------------------------------
572 */
573static int find_highest_priority_in_list_l(
574 struct thread_entry * const thread)
575{
576 if (LIKELY(thread != NULL))
577 {
578 /* Go though list until the ending up at the initial thread */
579 int highest_priority = thread->priority;
580 struct thread_entry *curr = thread;
581
582 do
583 {
584 int priority = curr->priority;
585
586 if (priority < highest_priority)
587 highest_priority = priority;
588
589 curr = curr->l.next;
590 }
591 while (curr != thread);
592
593 return highest_priority;
594 }
595
596 return PRIORITY_IDLE;
597}
598
599/*---------------------------------------------------------------------------
600 * Register priority with blocking system and bubble it down the chain if
601 * any until we reach the end or something is already equal or higher.
602 *
603 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
604 * targets but that same action also guarantees a circular block anyway and
605 * those are prevented, right? :-)
606 *---------------------------------------------------------------------------
607 */
608static struct thread_entry *
609 blocker_inherit_priority(struct thread_entry *current)
610{
611 const int priority = current->priority;
612 struct blocker *bl = current->blocker;
613 struct thread_entry * const tstart = current;
614 struct thread_entry *bl_t = bl->thread;
615
616 /* Blocker cannot change since the object protection is held */
617 LOCK_THREAD(bl_t);
618
619 for (;;)
620 {
621 struct thread_entry *next;
622 int bl_pr = bl->priority;
623
624 if (priority >= bl_pr)
625 break; /* Object priority already high enough */
626
627 bl->priority = priority;
628
629 /* Add this one */
630 prio_add_entry(&bl_t->pdist, priority);
631
632 if (bl_pr < PRIORITY_IDLE)
633 {
634 /* Not first waiter - subtract old one */
635 prio_subtract_entry(&bl_t->pdist, bl_pr);
636 }
637
638 if (priority >= bl_t->priority)
639 break; /* Thread priority high enough */
640
641 if (bl_t->state == STATE_RUNNING)
642 {
643 /* Blocking thread is a running thread therefore there are no
644 * further blockers. Change the "run queue" on which it
645 * resides. */
646 set_running_thread_priority(bl_t, priority);
647 break;
648 }
649
650 bl_t->priority = priority;
651
652 /* If blocking thread has a blocker, apply transitive inheritance */
653 bl = bl_t->blocker;
654
655 if (bl == NULL)
656 break; /* End of chain or object doesn't support inheritance */
657
658 next = bl->thread;
659
660 if (UNLIKELY(next == tstart))
661 break; /* Full-circle - deadlock! */
662
663 UNLOCK_THREAD(current);
664
665#if NUM_CORES > 1
666 for (;;)
667 {
668 LOCK_THREAD(next);
669
670 /* Blocker could change - retest condition */
671 if (LIKELY(bl->thread == next))
672 break;
673
674 UNLOCK_THREAD(next);
675 next = bl->thread;
676 }
677#endif
678 current = bl_t;
679 bl_t = next;
680 }
681
682 UNLOCK_THREAD(bl_t);
683
684 return current;
685}
686
687/*---------------------------------------------------------------------------
688 * Readjust priorities when waking a thread blocked waiting for another
689 * in essence "releasing" the thread's effect on the object owner. Can be
690 * performed from any context.
691 *---------------------------------------------------------------------------
692 */
693struct thread_entry *
694 wakeup_priority_protocol_release(struct thread_entry *thread)
695{
696 const int priority = thread->priority;
697 struct blocker *bl = thread->blocker;
698 struct thread_entry * const tstart = thread;
699 struct thread_entry *bl_t = bl->thread;
700
701 /* Blocker cannot change since object will be locked */
702 LOCK_THREAD(bl_t);
703
704 thread->blocker = NULL; /* Thread not blocked */
705
706 for (;;)
707 {
708 struct thread_entry *next;
709 int bl_pr = bl->priority;
710
711 if (priority > bl_pr)
712 break; /* Object priority higher */
713
714 next = *thread->bqp;
715
716 if (next == NULL)
717 {
718 /* No more threads in queue */
719 prio_subtract_entry(&bl_t->pdist, bl_pr);
720 bl->priority = PRIORITY_IDLE;
721 }
722 else
723 {
724 /* Check list for highest remaining priority */
725 int queue_pr = find_highest_priority_in_list_l(next);
726
727 if (queue_pr == bl_pr)
728 break; /* Object priority not changing */
729
730 /* Change queue priority */
731 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
732 bl->priority = queue_pr;
733 }
734
735 if (bl_pr > bl_t->priority)
736 break; /* thread priority is higher */
737
738 bl_pr = find_first_set_bit(bl_t->pdist.mask);
739
740 if (bl_pr == bl_t->priority)
741 break; /* Thread priority not changing */
742
743 if (bl_t->state == STATE_RUNNING)
744 {
745 /* No further blockers */
746 set_running_thread_priority(bl_t, bl_pr);
747 break;
748 }
749
750 bl_t->priority = bl_pr;
751
752 /* If blocking thread has a blocker, apply transitive inheritance */
753 bl = bl_t->blocker;
754
755 if (bl == NULL)
756 break; /* End of chain or object doesn't support inheritance */
757
758 next = bl->thread;
759
760 if (UNLIKELY(next == tstart))
761 break; /* Full-circle - deadlock! */
762
763 UNLOCK_THREAD(thread);
764
765#if NUM_CORES > 1
766 for (;;)
767 {
768 LOCK_THREAD(next);
769
770 /* Blocker could change - retest condition */
771 if (LIKELY(bl->thread == next))
772 break;
773
774 UNLOCK_THREAD(next);
775 next = bl->thread;
776 }
777#endif
778 thread = bl_t;
779 bl_t = next;
780 }
781
782 UNLOCK_THREAD(bl_t);
783
784#if NUM_CORES > 1
785 if (UNLIKELY(thread != tstart))
786 {
787 /* Relock original if it changed */
788 LOCK_THREAD(tstart);
789 }
790#endif
791
792 return cores[CURRENT_CORE].running;
793}
794
795/*---------------------------------------------------------------------------
796 * Transfer ownership to a thread waiting for an objects and transfer
797 * inherited priority boost from other waiters. This algorithm knows that
798 * blocking chains may only unblock from the very end.
799 *
800 * Only the owning thread itself may call this and so the assumption that
801 * it is the running thread is made.
802 *---------------------------------------------------------------------------
803 */
804struct thread_entry *
805 wakeup_priority_protocol_transfer(struct thread_entry *thread)
806{
807 /* Waking thread inherits priority boost from object owner */
808 struct blocker *bl = thread->blocker;
809 struct thread_entry *bl_t = bl->thread;
810 struct thread_entry *next;
811 int bl_pr;
812
813 THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t,
814 "UPPT->wrong thread", cores[CURRENT_CORE].running);
815
816 LOCK_THREAD(bl_t);
817
818 bl_pr = bl->priority;
819
820 /* Remove the object's boost from the owning thread */
821 if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
822 bl_pr <= bl_t->priority)
823 {
824 /* No more threads at this priority are waiting and the old level is
825 * at least the thread level */
826 int priority = find_first_set_bit(bl_t->pdist.mask);
827
828 if (priority != bl_t->priority)
829 {
830 /* Adjust this thread's priority */
831 set_running_thread_priority(bl_t, priority);
832 }
833 }
834
835 next = *thread->bqp;
836
837 if (LIKELY(next == NULL))
838 {
839 /* Expected shortcut - no more waiters */
840 bl_pr = PRIORITY_IDLE;
841 }
842 else
843 {
844 if (thread->priority <= bl_pr)
845 {
846 /* Need to scan threads remaining in queue */
847 bl_pr = find_highest_priority_in_list_l(next);
848 }
849
850 if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
851 bl_pr < thread->priority)
852 {
853 /* Thread priority must be raised */
854 thread->priority = bl_pr;
855 }
856 }
857
858 bl->thread = thread; /* This thread pwns */
859 bl->priority = bl_pr; /* Save highest blocked priority */
860 thread->blocker = NULL; /* Thread not blocked */
861
862 UNLOCK_THREAD(bl_t);
863
864 return bl_t;
865}
866
867/*---------------------------------------------------------------------------
868 * No threads must be blocked waiting for this thread except for it to exit.
869 * The alternative is more elaborate cleanup and object registration code.
870 * Check this for risk of silent data corruption when objects with
871 * inheritable blocking are abandoned by the owner - not precise but may
872 * catch something.
873 *---------------------------------------------------------------------------
874 */
875static void __attribute__((noinline)) check_for_obj_waiters(
876 const char *function, struct thread_entry *thread)
877{
878 /* Only one bit in the mask should be set with a frequency on 1 which
879 * represents the thread's own base priority */
880 uint32_t mask = thread->pdist.mask;
881 if ((mask & (mask - 1)) != 0 ||
882 thread->pdist.hist[find_first_set_bit(mask)] > 1)
883 {
884 unsigned char name[32];
885 thread_get_name(name, 32, thread);
886 panicf("%s->%s with obj. waiters", function, name);
887 }
888}
889#endif /* HAVE_PRIORITY_SCHEDULING */
890
891/*---------------------------------------------------------------------------
892 * Move a thread back to a running state on its core.
893 *---------------------------------------------------------------------------
894 */
895static void core_schedule_wakeup(struct thread_entry *thread)
896{
897 const unsigned int core = IF_COP_CORE(thread->core);
898
899 RTR_LOCK(core);
900
901 thread->state = STATE_RUNNING;
902
903 add_to_list_l(&cores[core].running, thread);
904 rtr_add_entry(core, thread->priority);
905
906 RTR_UNLOCK(core);
907
908#if NUM_CORES > 1
909 if (core != CURRENT_CORE)
910 core_wake(core);
911#endif
912}
913
914/*---------------------------------------------------------------------------
915 * Check the core's timeout list when at least one thread is due to wake.
916 * Filtering for the condition is done before making the call. Resets the
917 * tick when the next check will occur.
918 *---------------------------------------------------------------------------
919 */
920void check_tmo_threads(void)
921{
922 const unsigned int core = CURRENT_CORE;
923 const long tick = current_tick; /* snapshot the current tick */
924 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
925 struct thread_entry *next = cores[core].timeout;
926
927 /* If there are no processes waiting for a timeout, just keep the check
928 tick from falling into the past. */
929
930 /* Break the loop once we have walked through the list of all
931 * sleeping processes or have removed them all. */
932 while (next != NULL)
933 {
934 /* Check sleeping threads. Allow interrupts between checks. */
935 enable_irq();
936
937 struct thread_entry *curr = next;
938
939 next = curr->tmo.next;
940
941 /* Lock thread slot against explicit wakeup */
942 disable_irq();
943 LOCK_THREAD(curr);
944
945 unsigned state = curr->state;
946
947 if (state < TIMEOUT_STATE_FIRST)
948 {
949 /* Cleanup threads no longer on a timeout but still on the
950 * list. */
951 remove_from_list_tmo(curr);
952 }
953 else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
954 {
955 /* Timeout still pending - this will be the usual case */
956 if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
957 {
958 /* Earliest timeout found so far - move the next check up
959 to its time */
960 next_tmo_check = curr->tmo_tick;
961 }
962 }
963 else
964 {
965 /* Sleep timeout has been reached so bring the thread back to
966 * life again. */
967 if (state == STATE_BLOCKED_W_TMO)
968 {
969#ifdef HAVE_CORELOCK_OBJECT
970 /* Lock the waiting thread's kernel object */
971 struct corelock *ocl = curr->obj_cl;
972
973 if (UNLIKELY(corelock_try_lock(ocl) == 0))
974 {
975 /* Need to retry in the correct order though the need is
976 * unlikely */
977 UNLOCK_THREAD(curr);
978 corelock_lock(ocl);
979 LOCK_THREAD(curr);
980
981 if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
982 {
983 /* Thread was woken or removed explicitely while slot
984 * was unlocked */
985 corelock_unlock(ocl);
986 remove_from_list_tmo(curr);
987 UNLOCK_THREAD(curr);
988 continue;
989 }
990 }
991#endif /* NUM_CORES */
992
993 remove_from_list_l(curr->bqp, curr);
994
995#ifdef HAVE_WAKEUP_EXT_CB
996 if (curr->wakeup_ext_cb != NULL)
997 curr->wakeup_ext_cb(curr);
998#endif
999
1000#ifdef HAVE_PRIORITY_SCHEDULING
1001 if (curr->blocker != NULL)
1002 wakeup_priority_protocol_release(curr);
1003#endif
1004 corelock_unlock(ocl);
1005 }
1006 /* else state == STATE_SLEEPING */
1007
1008 remove_from_list_tmo(curr);
1009
1010 RTR_LOCK(core);
1011
1012 curr->state = STATE_RUNNING;
1013
1014 add_to_list_l(&cores[core].running, curr);
1015 rtr_add_entry(core, curr->priority);
1016
1017 RTR_UNLOCK(core);
1018 }
1019
1020 UNLOCK_THREAD(curr);
1021 }
1022
1023 cores[core].next_tmo_check = next_tmo_check;
1024}
1025
1026/*---------------------------------------------------------------------------
1027 * Performs operations that must be done before blocking a thread but after
1028 * the state is saved.
1029 *---------------------------------------------------------------------------
1030 */
1031#if NUM_CORES > 1
1032static inline void run_blocking_ops(
1033 unsigned int core, struct thread_entry *thread)
1034{
1035 struct thread_blk_ops *ops = &cores[core].blk_ops;
1036 const unsigned flags = ops->flags;
1037
1038 if (LIKELY(flags == TBOP_CLEAR))
1039 return;
1040
1041 switch (flags)
1042 {
1043 case TBOP_SWITCH_CORE:
1044 core_switch_blk_op(core, thread);
1045 /* Fall-through */
1046 case TBOP_UNLOCK_CORELOCK:
1047 corelock_unlock(ops->cl_p);
1048 break;
1049 }
1050
1051 ops->flags = TBOP_CLEAR;
1052}
1053#endif /* NUM_CORES > 1 */
1054
1055#ifdef RB_PROFILE
1056void profile_thread(void)
1057{
1058 profstart(cores[CURRENT_CORE].running - threads);
1059}
1060#endif
1061
1062/*---------------------------------------------------------------------------
1063 * Prepares a thread to block on an object's list and/or for a specified
1064 * duration - expects object and slot to be appropriately locked if needed
1065 * and interrupts to be masked.
1066 *---------------------------------------------------------------------------
1067 */
1068static inline void block_thread_on_l(struct thread_entry *thread,
1069 unsigned state)
1070{
1071 /* If inlined, unreachable branches will be pruned with no size penalty
1072 because state is passed as a constant parameter. */
1073 const unsigned int core = IF_COP_CORE(thread->core);
1074
1075 /* Remove the thread from the list of running threads. */
1076 RTR_LOCK(core);
1077 remove_from_list_l(&cores[core].running, thread);
1078 rtr_subtract_entry(core, thread->priority);
1079 RTR_UNLOCK(core);
1080
1081 /* Add a timeout to the block if not infinite */
1082 switch (state)
1083 {
1084 case STATE_BLOCKED:
1085 case STATE_BLOCKED_W_TMO:
1086 /* Put the thread into a new list of inactive threads. */
1087 add_to_list_l(thread->bqp, thread);
1088
1089 if (state == STATE_BLOCKED)
1090 break;
1091
1092 /* Fall-through */
1093 case STATE_SLEEPING:
1094 /* If this thread times out sooner than any other thread, update
1095 next_tmo_check to its timeout */
1096 if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
1097 {
1098 cores[core].next_tmo_check = thread->tmo_tick;
1099 }
1100
1101 if (thread->tmo.prev == NULL)
1102 {
1103 add_to_list_tmo(thread);
1104 }
1105 /* else thread was never removed from list - just keep it there */
1106 break;
1107 }
1108
1109 /* Remember the the next thread about to block. */
1110 cores[core].block_task = thread;
1111
1112 /* Report new state. */
1113 thread->state = state;
1114}
1115
1116/*---------------------------------------------------------------------------
1117 * Switch thread in round robin fashion for any given priority. Any thread
1118 * that removed itself from the running list first must specify itself in
1119 * the paramter.
1120 *
1121 * INTERNAL: Intended for use by kernel and not for programs.
1122 *---------------------------------------------------------------------------
1123 */
1124void switch_thread(void)
1125{
1126
1127 const unsigned int core = CURRENT_CORE;
1128 struct thread_entry *block = cores[core].block_task;
1129 struct thread_entry *thread = cores[core].running;
1130
1131 /* Get context to save - next thread to run is unknown until all wakeups
1132 * are evaluated */
1133 if (block != NULL)
1134 {
1135 cores[core].block_task = NULL;
1136
1137#if NUM_CORES > 1
1138 if (UNLIKELY(thread == block))
1139 {
1140 /* This was the last thread running and another core woke us before
1141 * reaching here. Force next thread selection to give tmo threads or
1142 * other threads woken before this block a first chance. */
1143 block = NULL;
1144 }
1145 else
1146#endif
1147 {
1148 /* Blocking task is the old one */
1149 thread = block;
1150 }
1151 }
1152
1153#ifdef RB_PROFILE
1154#ifdef CPU_COLDFIRE
1155 _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1156#else
1157 profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
1158#endif
1159#endif
1160
1161 /* Begin task switching by saving our current context so that we can
1162 * restore the state of the current thread later to the point prior
1163 * to this call. */
1164 store_context(&thread->context);
1165
1166#ifdef DEBUG
1167 /* Check core_ctx buflib integrity */
1168 core_check_valid();
1169#endif
1170
1171 /* Check if the current thread stack is overflown */
1172 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1173 thread_stkov(thread);
1174
1175#if NUM_CORES > 1
1176 /* Run any blocking operations requested before switching/sleeping */
1177 run_blocking_ops(core, thread);
1178#endif
1179
1180#ifdef HAVE_PRIORITY_SCHEDULING
1181 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1182 /* Reset the value of thread's skip count */
1183 thread->skip_count = 0;
1184#endif
1185
1186 for (;;)
1187 {
1188 /* If there are threads on a timeout and the earliest wakeup is due,
1189 * check the list and wake any threads that need to start running
1190 * again. */
1191 if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
1192 {
1193 check_tmo_threads();
1194 }
1195
1196 disable_irq();
1197 RTR_LOCK(core);
1198
1199 thread = cores[core].running;
1200
1201 if (UNLIKELY(thread == NULL))
1202 {
1203 /* Enter sleep mode to reduce power usage - woken up on interrupt
1204 * or wakeup request from another core - expected to enable
1205 * interrupts. */
1206 RTR_UNLOCK(core);
1207 core_sleep(IF_COP(core));
1208 }
1209 else
1210 {
1211#ifdef HAVE_PRIORITY_SCHEDULING
1212 /* Select the new task based on priorities and the last time a
1213 * process got CPU time relative to the highest priority runnable
1214 * task. */
1215 struct priority_distribution *pd = &cores[core].rtr;
1216 int max = find_first_set_bit(pd->mask);
1217
1218 if (block == NULL)
1219 {
1220 /* Not switching on a block, tentatively select next thread */
1221 thread = thread->l.next;
1222 }
1223
1224 for (;;)
1225 {
1226 int priority = thread->priority;
1227 int diff;
1228
1229 /* This ridiculously simple method of aging seems to work
1230 * suspiciously well. It does tend to reward CPU hogs (under
1231 * yielding) but that's generally not desirable at all. On
1232 * the plus side, it, relatively to other threads, penalizes
1233 * excess yielding which is good if some high priority thread
1234 * is performing no useful work such as polling for a device
1235 * to be ready. Of course, aging is only employed when higher
1236 * and lower priority threads are runnable. The highest
1237 * priority runnable thread(s) are never skipped unless a
1238 * lower-priority process has aged sufficiently. Priorities
1239 * of REALTIME class are run strictly according to priority
1240 * thus are not subject to switchout due to lower-priority
1241 * processes aging; they must give up the processor by going
1242 * off the run list. */
1243 if (LIKELY(priority <= max) ||
1244 IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
1245 (priority > PRIORITY_REALTIME &&
1246 (diff = priority - max,
1247 ++thread->skip_count > diff*diff)))
1248 {
1249 cores[core].running = thread;
1250 break;
1251 }
1252
1253 thread = thread->l.next;
1254 }
1255#else
1256 /* Without priority use a simple FCFS algorithm */
1257 if (block == NULL)
1258 {
1259 /* Not switching on a block, select next thread */
1260 thread = thread->l.next;
1261 cores[core].running = thread;
1262 }
1263#endif /* HAVE_PRIORITY_SCHEDULING */
1264
1265 RTR_UNLOCK(core);
1266 enable_irq();
1267 break;
1268 }
1269 }
1270
1271 /* And finally give control to the next thread. */
1272 load_context(&thread->context);
1273
1274#ifdef RB_PROFILE
1275 profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
1276#endif
1277
1278}
1279
1280/*---------------------------------------------------------------------------
1281 * Sleeps a thread for at least a specified number of ticks with zero being
1282 * a wait until the next tick.
1283 *
1284 * INTERNAL: Intended for use by kernel and not for programs.
1285 *---------------------------------------------------------------------------
1286 */
1287void sleep_thread(int ticks)
1288{
1289 struct thread_entry *current = cores[CURRENT_CORE].running;
1290
1291 LOCK_THREAD(current);
1292
1293 /* Set our timeout, remove from run list and join timeout list. */
1294 current->tmo_tick = current_tick + ticks + 1;
1295 block_thread_on_l(current, STATE_SLEEPING);
1296
1297 UNLOCK_THREAD(current);
1298}
1299
1300/*---------------------------------------------------------------------------
1301 * Indefinitely block a thread on a blocking queue for explicit wakeup.
1302 *
1303 * INTERNAL: Intended for use by kernel objects and not for programs.
1304 *---------------------------------------------------------------------------
1305 */
1306void block_thread(struct thread_entry *current)
1307{
1308 /* Set the state to blocked and take us off of the run queue until we
1309 * are explicitly woken */
1310 LOCK_THREAD(current);
1311
1312 /* Set the list for explicit wakeup */
1313 block_thread_on_l(current, STATE_BLOCKED);
1314
1315#ifdef HAVE_PRIORITY_SCHEDULING
1316 if (current->blocker != NULL)
1317 {
1318 /* Object supports PIP */
1319 current = blocker_inherit_priority(current);
1320 }
1321#endif
1322
1323 UNLOCK_THREAD(current);
1324}
1325
1326/*---------------------------------------------------------------------------
1327 * Block a thread on a blocking queue for a specified time interval or until
1328 * explicitly woken - whichever happens first.
1329 *
1330 * INTERNAL: Intended for use by kernel objects and not for programs.
1331 *---------------------------------------------------------------------------
1332 */
1333void block_thread_w_tmo(struct thread_entry *current, int timeout)
1334{
1335 /* Get the entry for the current running thread. */
1336 LOCK_THREAD(current);
1337
1338 /* Set the state to blocked with the specified timeout */
1339 current->tmo_tick = current_tick + timeout;
1340
1341 /* Set the list for explicit wakeup */
1342 block_thread_on_l(current, STATE_BLOCKED_W_TMO);
1343
1344#ifdef HAVE_PRIORITY_SCHEDULING
1345 if (current->blocker != NULL)
1346 {
1347 /* Object supports PIP */
1348 current = blocker_inherit_priority(current);
1349 }
1350#endif
1351
1352 UNLOCK_THREAD(current);
1353}
1354
1355/*---------------------------------------------------------------------------
1356 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
1357 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
1358 *
1359 * This code should be considered a critical section by the caller meaning
1360 * that the object's corelock should be held.
1361 *
1362 * INTERNAL: Intended for use by kernel objects and not for programs.
1363 *---------------------------------------------------------------------------
1364 */
1365unsigned int wakeup_thread(struct thread_entry **list)
1366{
1367 struct thread_entry *thread = *list;
1368 unsigned int result = THREAD_NONE;
1369
1370 /* Check if there is a blocked thread at all. */
1371 if (thread == NULL)
1372 return result;
1373
1374 LOCK_THREAD(thread);
1375
1376 /* Determine thread's current state. */
1377 switch (thread->state)
1378 {
1379 case STATE_BLOCKED:
1380 case STATE_BLOCKED_W_TMO:
1381 remove_from_list_l(list, thread);
1382
1383 result = THREAD_OK;
1384
1385#ifdef HAVE_PRIORITY_SCHEDULING
1386 struct thread_entry *current;
1387 struct blocker *bl = thread->blocker;
1388
1389 if (bl == NULL)
1390 {
1391 /* No inheritance - just boost the thread by aging */
1392 IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
1393 thread->skip_count = thread->priority;
1394 current = cores[CURRENT_CORE].running;
1395 }
1396 else
1397 {
1398 /* Call the specified unblocking PIP */
1399 current = bl->wakeup_protocol(thread);
1400 }
1401
1402 if (current != NULL &&
1403 find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
1404 < current->priority)
1405 {
1406 /* There is a thread ready to run of higher or same priority on
1407 * the same core as the current one; recommend a task switch.
1408 * Knowing if this is an interrupt call would be helpful here. */
1409 result |= THREAD_SWITCH;
1410 }
1411#endif /* HAVE_PRIORITY_SCHEDULING */
1412
1413 core_schedule_wakeup(thread);
1414 break;
1415
1416 /* Nothing to do. State is not blocked. */
1417#if THREAD_EXTRA_CHECKS
1418 default:
1419 THREAD_PANICF("wakeup_thread->block invalid", thread);
1420 case STATE_RUNNING:
1421 case STATE_KILLED:
1422 break;
1423#endif
1424 }
1425
1426 UNLOCK_THREAD(thread);
1427 return result;
1428}
1429
1430/*---------------------------------------------------------------------------
1431 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
1432 * from each operation or THREAD_NONE of nothing was awakened. Object owning
1433 * the queue must be locked first.
1434 *
1435 * INTERNAL: Intended for use by kernel objects and not for programs.
1436 *---------------------------------------------------------------------------
1437 */
1438unsigned int thread_queue_wake(struct thread_entry **list)
1439{
1440 unsigned result = THREAD_NONE;
1441
1442 for (;;)
1443 {
1444 unsigned int rc = wakeup_thread(list);
1445
1446 if (rc == THREAD_NONE)
1447 break; /* No more threads */
1448
1449 result |= rc;
1450 }
1451
1452 return result;
1453}
1454
1455/*---------------------------------------------------------------------------
1456 * Assign the thread slot a new ID. Version is 1-255.
1457 *---------------------------------------------------------------------------
1458 */
1459static void new_thread_id(unsigned int slot_num,
1460 struct thread_entry *thread)
1461{
1462 unsigned int version =
1463 (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
1464 & THREAD_ID_VERSION_MASK;
1465
1466 /* If wrapped to 0, make it 1 */
1467 if (version == 0)
1468 version = 1u << THREAD_ID_VERSION_SHIFT;
1469
1470 thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
1471}
1472
1473/*---------------------------------------------------------------------------
1474 * Find an empty thread slot or MAXTHREADS if none found. The slot returned
1475 * will be locked on multicore.
1476 *---------------------------------------------------------------------------
1477 */
1478static struct thread_entry * find_empty_thread_slot(void)
1479{
1480 /* Any slot could be on an interrupt-accessible list */
1481 IF_COP( int oldlevel = disable_irq_save(); )
1482 struct thread_entry *thread = NULL;
1483 int n;
1484
1485 for (n = 0; n < MAXTHREADS; n++)
1486 {
1487 /* Obtain current slot state - lock it on multicore */
1488 struct thread_entry *t = &threads[n];
1489 LOCK_THREAD(t);
1490
1491 if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
1492 {
1493 /* Slot is empty - leave it locked and caller will unlock */
1494 thread = t;
1495 break;
1496 }
1497
1498 /* Finished examining slot - no longer busy - unlock on multicore */
1499 UNLOCK_THREAD(t);
1500 }
1501
1502 IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
1503 not accesible to them yet */
1504 return thread;
1505}
1506
1507/*---------------------------------------------------------------------------
1508 * Return the thread_entry pointer for a thread_id. Return the current
1509 * thread if the ID is (unsigned int)-1 (alias for current).
1510 *---------------------------------------------------------------------------
1511 */
1512struct thread_entry * thread_id_entry(unsigned int thread_id)
1513{
1514 return &threads[thread_id & THREAD_ID_SLOT_MASK];
1515}
1516
1517/*---------------------------------------------------------------------------
1518 * Return the thread id of the calling thread
1519 * --------------------------------------------------------------------------
1520 */
1521unsigned int thread_self(void)
1522{
1523 return cores[CURRENT_CORE].running->id;
1524}
1525
1526/*---------------------------------------------------------------------------
1527 * Return the thread entry of the calling thread.
1528 *
1529 * INTERNAL: Intended for use by kernel and not for programs.
1530 *---------------------------------------------------------------------------
1531 */
1532struct thread_entry* thread_self_entry(void)
1533{
1534 return cores[CURRENT_CORE].running;
1535}
1536
1537/*---------------------------------------------------------------------------
1538 * Place the current core in idle mode - woken up on interrupt or wake
1539 * request from another core.
1540 *---------------------------------------------------------------------------
1541 */
1542void core_idle(void)
1543{
1544 IF_COP( const unsigned int core = CURRENT_CORE; )
1545 disable_irq();
1546 core_sleep(IF_COP(core));
1547}
1548
1549/*---------------------------------------------------------------------------
1550 * Create a thread. If using a dual core architecture, specify which core to
1551 * start the thread on.
1552 *
1553 * Return ID if context area could be allocated, else NULL.
1554 *---------------------------------------------------------------------------
1555 */
1556unsigned int create_thread(void (*function)(void),
1557 void* stack, size_t stack_size,
1558 unsigned flags, const char *name
1559 IF_PRIO(, int priority)
1560 IF_COP(, unsigned int core))
1561{
1562 unsigned int i;
1563 unsigned int stack_words;
1564 uintptr_t stackptr, stackend;
1565 struct thread_entry *thread;
1566 unsigned state;
1567 int oldlevel;
1568
1569 thread = find_empty_thread_slot();
1570 if (thread == NULL)
1571 {
1572 return 0;
1573 }
1574
1575 oldlevel = disable_irq_save();
1576
1577 /* Munge the stack to make it easy to spot stack overflows */
1578 stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
1579 stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
1580 stack_size = stackend - stackptr;
1581 stack_words = stack_size / sizeof (uintptr_t);
1582
1583 for (i = 0; i < stack_words; i++)
1584 {
1585 ((uintptr_t *)stackptr)[i] = DEADBEEF;
1586 }
1587
1588 /* Store interesting information */
1589 thread->name = name;
1590 thread->stack = (uintptr_t *)stackptr;
1591 thread->stack_size = stack_size;
1592 thread->queue = NULL;
1593#ifdef HAVE_WAKEUP_EXT_CB
1594 thread->wakeup_ext_cb = NULL;
1595#endif
1596#ifdef HAVE_SCHEDULER_BOOSTCTRL
1597 thread->cpu_boost = 0;
1598#endif
1599#ifdef HAVE_PRIORITY_SCHEDULING
1600 memset(&thread->pdist, 0, sizeof(thread->pdist));
1601 thread->blocker = NULL;
1602 thread->base_priority = priority;
1603 thread->priority = priority;
1604 thread->skip_count = priority;
1605 prio_add_entry(&thread->pdist, priority);
1606#endif
1607
1608#ifdef HAVE_IO_PRIORITY
1609 /* Default to high (foreground) priority */
1610 thread->io_priority = IO_PRIORITY_IMMEDIATE;
1611#endif
1612
1613#if NUM_CORES > 1
1614 thread->core = core;
1615
1616 /* Writeback stack munging or anything else before starting */
1617 if (core != CURRENT_CORE)
1618 {
1619 commit_dcache();
1620 }
1621#endif
1622
1623 /* Thread is not on any timeout list but be a bit paranoid */
1624 thread->tmo.prev = NULL;
1625
1626 state = (flags & CREATE_THREAD_FROZEN) ?
1627 STATE_FROZEN : STATE_RUNNING;
1628
1629 thread->context.sp = (typeof (thread->context.sp))stackend;
1630
1631 /* Load the thread's context structure with needed startup information */
1632 THREAD_STARTUP_INIT(core, thread, function);
1633
1634 thread->state = state;
1635 i = thread->id; /* Snapshot while locked */
1636
1637 if (state == STATE_RUNNING)
1638 core_schedule_wakeup(thread);
1639
1640 UNLOCK_THREAD(thread);
1641 restore_irq(oldlevel);
1642
1643 return i;
1644}
1645
1646#ifdef HAVE_SCHEDULER_BOOSTCTRL
1647/*---------------------------------------------------------------------------
1648 * Change the boost state of a thread boosting or unboosting the CPU
1649 * as required.
1650 *---------------------------------------------------------------------------
1651 */
1652static inline void boost_thread(struct thread_entry *thread, bool boost)
1653{
1654 if ((thread->cpu_boost != 0) != boost)
1655 {
1656 thread->cpu_boost = boost;
1657 cpu_boost(boost);
1658 }
1659}
1660
1661void trigger_cpu_boost(void)
1662{
1663 struct thread_entry *current = cores[CURRENT_CORE].running;
1664 boost_thread(current, true);
1665}
1666
1667void cancel_cpu_boost(void)
1668{
1669 struct thread_entry *current = cores[CURRENT_CORE].running;
1670 boost_thread(current, false);
1671}
1672#endif /* HAVE_SCHEDULER_BOOSTCTRL */
1673
1674/*---------------------------------------------------------------------------
1675 * Block the current thread until another thread terminates. A thread may
1676 * wait on itself to terminate which prevents it from running again and it
1677 * will need to be killed externally.
1678 * Parameter is the ID as returned from create_thread().
1679 *---------------------------------------------------------------------------
1680 */
1681void thread_wait(unsigned int thread_id)
1682{
1683 struct thread_entry *current = cores[CURRENT_CORE].running;
1684 struct thread_entry *thread = thread_id_entry(thread_id);
1685
1686 /* Lock thread-as-waitable-object lock */
1687 corelock_lock(&thread->waiter_cl);
1688
1689 /* Be sure it hasn't been killed yet */
1690 if (thread->id == thread_id && thread->state != STATE_KILLED)
1691 {
1692 IF_COP( current->obj_cl = &thread->waiter_cl; )
1693 current->bqp = &thread->queue;
1694
1695 disable_irq();
1696 block_thread(current);
1697
1698 corelock_unlock(&thread->waiter_cl);
1699
1700 switch_thread();
1701 return;
1702 }
1703
1704 corelock_unlock(&thread->waiter_cl);
1705}
1706
1707/*---------------------------------------------------------------------------
1708 * Exit the current thread. The Right Way to Do Things (TM).
1709 *---------------------------------------------------------------------------
1710 */
1711/* This is done to foil optimizations that may require the current stack,
1712 * such as optimizing subexpressions that put variables on the stack that
1713 * get used after switching stacks. */
1714#if NUM_CORES > 1
1715/* Called by ASM stub */
1716static void thread_final_exit_do(struct thread_entry *current)
1717#else
1718/* No special procedure is required before calling */
1719static inline void thread_final_exit(struct thread_entry *current)
1720#endif
1721{
1722 /* At this point, this thread isn't using resources allocated for
1723 * execution except the slot itself. */
1724
1725 /* Signal this thread */
1726 thread_queue_wake(&current->queue);
1727 corelock_unlock(&current->waiter_cl);
1728 switch_thread();
1729 /* This should never and must never be reached - if it is, the
1730 * state is corrupted */
1731 THREAD_PANICF("thread_exit->K:*R", current);
1732 while (1);
1733}
1734
1735void thread_exit(void)
1736{
1737 register struct thread_entry * current = cores[CURRENT_CORE].running;
1738
1739 /* Cancel CPU boost if any */
1740 cancel_cpu_boost();
1741
1742 disable_irq();
1743
1744 corelock_lock(&current->waiter_cl);
1745 LOCK_THREAD(current);
1746
1747#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
1748 if (current->name == THREAD_DESTRUCT)
1749 {
1750 /* Thread being killed - become a waiter */
1751 unsigned int id = current->id;
1752 UNLOCK_THREAD(current);
1753 corelock_unlock(&current->waiter_cl);
1754 thread_wait(id);
1755 THREAD_PANICF("thread_exit->WK:*R", current);
1756 }
1757#endif
1758
1759#ifdef HAVE_PRIORITY_SCHEDULING
1760 check_for_obj_waiters("thread_exit", current);
1761#endif
1762
1763 if (current->tmo.prev != NULL)
1764 {
1765 /* Cancel pending timeout list removal */
1766 remove_from_list_tmo(current);
1767 }
1768
1769 /* Switch tasks and never return */
1770 block_thread_on_l(current, STATE_KILLED);
1771
1772 /* Slot must be unusable until thread is really gone */
1773 UNLOCK_THREAD_AT_TASK_SWITCH(current);
1774
1775 /* Update ID for this slot */
1776 new_thread_id(current->id, current);
1777 current->name = NULL;
1778
1779 /* Do final cleanup and remove the thread */
1780 thread_final_exit(current);
1781}
1782
1783#ifdef ALLOW_REMOVE_THREAD
1784/*---------------------------------------------------------------------------
1785 * Remove a thread from the scheduler. Not The Right Way to Do Things in
1786 * normal programs.
1787 *
1788 * Parameter is the ID as returned from create_thread().
1789 *
1790 * Use with care on threads that are not under careful control as this may
1791 * leave various objects in an undefined state.
1792 *---------------------------------------------------------------------------
1793 */
1794void remove_thread(unsigned int thread_id)
1795{
1796#ifdef HAVE_CORELOCK_OBJECT
1797 /* core is not constant here because of core switching */
1798 unsigned int core = CURRENT_CORE;
1799 unsigned int old_core = NUM_CORES;
1800 struct corelock *ocl = NULL;
1801#else
1802 const unsigned int core = CURRENT_CORE;
1803#endif
1804 struct thread_entry *current = cores[core].running;
1805 struct thread_entry *thread = thread_id_entry(thread_id);
1806
1807 unsigned state;
1808 int oldlevel;
1809
1810 if (thread == current)
1811 thread_exit(); /* Current thread - do normal exit */
1812
1813 oldlevel = disable_irq_save();
1814
1815 corelock_lock(&thread->waiter_cl);
1816 LOCK_THREAD(thread);
1817
1818 state = thread->state;
1819
1820 if (thread->id != thread_id || state == STATE_KILLED)
1821 goto thread_killed;
1822
1823#if NUM_CORES > 1
1824 if (thread->name == THREAD_DESTRUCT)
1825 {
1826 /* Thread being killed - become a waiter */
1827 UNLOCK_THREAD(thread);
1828 corelock_unlock(&thread->waiter_cl);
1829 restore_irq(oldlevel);
1830 thread_wait(thread_id);
1831 return;
1832 }
1833
1834 thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
1835
1836#ifdef HAVE_PRIORITY_SCHEDULING
1837 check_for_obj_waiters("remove_thread", thread);
1838#endif
1839
1840 if (thread->core != core)
1841 {
1842 /* Switch cores and safely extract the thread there */
1843 /* Slot HAS to be unlocked or a deadlock could occur which means other
1844 * threads have to be guided into becoming thread waiters if they
1845 * attempt to remove it. */
1846 unsigned int new_core = thread->core;
1847
1848 corelock_unlock(&thread->waiter_cl);
1849
1850 UNLOCK_THREAD(thread);
1851 restore_irq(oldlevel);
1852
1853 old_core = switch_core(new_core);
1854
1855 oldlevel = disable_irq_save();
1856
1857 corelock_lock(&thread->waiter_cl);
1858 LOCK_THREAD(thread);
1859
1860 state = thread->state;
1861 core = new_core;
1862 /* Perform the extraction and switch ourselves back to the original
1863 processor */
1864 }
1865#endif /* NUM_CORES > 1 */
1866
1867 if (thread->tmo.prev != NULL)
1868 {
1869 /* Clean thread off the timeout list if a timeout check hasn't
1870 * run yet */
1871 remove_from_list_tmo(thread);
1872 }
1873
1874#ifdef HAVE_SCHEDULER_BOOSTCTRL
1875 /* Cancel CPU boost if any */
1876 boost_thread(thread, false);
1877#endif
1878
1879IF_COP( retry_state: )
1880
1881 switch (state)
1882 {
1883 case STATE_RUNNING:
1884 RTR_LOCK(core);
1885 /* Remove thread from ready to run tasks */
1886 remove_from_list_l(&cores[core].running, thread);
1887 rtr_subtract_entry(core, thread->priority);
1888 RTR_UNLOCK(core);
1889 break;
1890 case STATE_BLOCKED:
1891 case STATE_BLOCKED_W_TMO:
1892 /* Remove thread from the queue it's blocked on - including its
1893 * own if waiting there */
1894#if NUM_CORES > 1
1895 if (&thread->waiter_cl != thread->obj_cl)
1896 {
1897 ocl = thread->obj_cl;
1898
1899 if (UNLIKELY(corelock_try_lock(ocl) == 0))
1900 {
1901 UNLOCK_THREAD(thread);
1902 corelock_lock(ocl);
1903 LOCK_THREAD(thread);
1904
1905 if (UNLIKELY(thread->state != state))
1906 {
1907 /* Something woke the thread */
1908 state = thread->state;
1909 corelock_unlock(ocl);
1910 goto retry_state;
1911 }
1912 }
1913 }
1914#endif
1915 remove_from_list_l(thread->bqp, thread);
1916
1917#ifdef HAVE_WAKEUP_EXT_CB
1918 if (thread->wakeup_ext_cb != NULL)
1919 thread->wakeup_ext_cb(thread);
1920#endif
1921
1922#ifdef HAVE_PRIORITY_SCHEDULING
1923 if (thread->blocker != NULL)
1924 {
1925 /* Remove thread's priority influence from its chain */
1926 wakeup_priority_protocol_release(thread);
1927 }
1928#endif
1929
1930#if NUM_CORES > 1
1931 if (ocl != NULL)
1932 corelock_unlock(ocl);
1933#endif
1934 break;
1935 /* Otherwise thread is frozen and hasn't run yet */
1936 }
1937
1938 new_thread_id(thread_id, thread);
1939 thread->state = STATE_KILLED;
1940
1941 /* If thread was waiting on itself, it will have been removed above.
1942 * The wrong order would result in waking the thread first and deadlocking
1943 * since the slot is already locked. */
1944 thread_queue_wake(&thread->queue);
1945
1946 thread->name = NULL;
1947
1948thread_killed: /* Thread was already killed */
1949 /* Removal complete - safe to unlock and reenable interrupts */
1950 corelock_unlock(&thread->waiter_cl);
1951 UNLOCK_THREAD(thread);
1952 restore_irq(oldlevel);
1953
1954#if NUM_CORES > 1
1955 if (old_core < NUM_CORES)
1956 {
1957 /* Did a removal on another processor's thread - switch back to
1958 native core */
1959 switch_core(old_core);
1960 }
1961#endif
1962}
1963#endif /* ALLOW_REMOVE_THREAD */
1964
1965#ifdef HAVE_PRIORITY_SCHEDULING
1966/*---------------------------------------------------------------------------
1967 * Sets the thread's relative base priority for the core it runs on. Any
1968 * needed inheritance changes also may happen.
1969 *---------------------------------------------------------------------------
1970 */
1971int thread_set_priority(unsigned int thread_id, int priority)
1972{
1973 int old_base_priority = -1;
1974 struct thread_entry *thread = thread_id_entry(thread_id);
1975
1976 /* A little safety measure */
1977 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1978 return -1;
1979
1980 /* Thread could be on any list and therefore on an interrupt accessible
1981 one - disable interrupts */
1982 int oldlevel = disable_irq_save();
1983
1984 LOCK_THREAD(thread);
1985
1986 /* Make sure it's not killed */
1987 if (thread->id == thread_id && thread->state != STATE_KILLED)
1988 {
1989 int old_priority = thread->priority;
1990
1991 old_base_priority = thread->base_priority;
1992 thread->base_priority = priority;
1993
1994 prio_move_entry(&thread->pdist, old_base_priority, priority);
1995 priority = find_first_set_bit(thread->pdist.mask);
1996
1997 if (old_priority == priority)
1998 {
1999 /* No priority change - do nothing */
2000 }
2001 else if (thread->state == STATE_RUNNING)
2002 {
2003 /* This thread is running - change location on the run
2004 * queue. No transitive inheritance needed. */
2005 set_running_thread_priority(thread, priority);
2006 }
2007 else
2008 {
2009 thread->priority = priority;
2010
2011 if (thread->blocker != NULL)
2012 {
2013 /* Bubble new priority down the chain */
2014 struct blocker *bl = thread->blocker; /* Blocker struct */
2015 struct thread_entry *bl_t = bl->thread; /* Blocking thread */
2016 struct thread_entry * const tstart = thread; /* Initial thread */
2017 const int highest = MIN(priority, old_priority); /* Higher of new or old */
2018
2019 for (;;)
2020 {
2021 struct thread_entry *next; /* Next thread to check */
2022 int bl_pr; /* Highest blocked thread */
2023 int queue_pr; /* New highest blocked thread */
2024#if NUM_CORES > 1
2025 /* Owner can change but thread cannot be dislodged - thread
2026 * may not be the first in the queue which allows other
2027 * threads ahead in the list to be given ownership during the
2028 * operation. If thread is next then the waker will have to
2029 * wait for us and the owner of the object will remain fixed.
2030 * If we successfully grab the owner -- which at some point
2031 * is guaranteed -- then the queue remains fixed until we
2032 * pass by. */
2033 for (;;)
2034 {
2035 LOCK_THREAD(bl_t);
2036
2037 /* Double-check the owner - retry if it changed */
2038 if (LIKELY(bl->thread == bl_t))
2039 break;
2040
2041 UNLOCK_THREAD(bl_t);
2042 bl_t = bl->thread;
2043 }
2044#endif
2045 bl_pr = bl->priority;
2046
2047 if (highest > bl_pr)
2048 break; /* Object priority won't change */
2049
2050 /* This will include the thread being set */
2051 queue_pr = find_highest_priority_in_list_l(*thread->bqp);
2052
2053 if (queue_pr == bl_pr)
2054 break; /* Object priority not changing */
2055
2056 /* Update thread boost for this object */
2057 bl->priority = queue_pr;
2058 prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
2059 bl_pr = find_first_set_bit(bl_t->pdist.mask);
2060
2061 if (bl_t->priority == bl_pr)
2062 break; /* Blocking thread priority not changing */
2063
2064 if (bl_t->state == STATE_RUNNING)
2065 {
2066 /* Thread not blocked - we're done */
2067 set_running_thread_priority(bl_t, bl_pr);
2068 break;
2069 }
2070
2071 bl_t->priority = bl_pr;
2072 bl = bl_t->blocker; /* Blocking thread has a blocker? */
2073
2074 if (bl == NULL)
2075 break; /* End of chain */
2076
2077 next = bl->thread;
2078
2079 if (UNLIKELY(next == tstart))
2080 break; /* Full-circle */
2081
2082 UNLOCK_THREAD(thread);
2083
2084 thread = bl_t;
2085 bl_t = next;
2086 } /* for (;;) */
2087
2088 UNLOCK_THREAD(bl_t);
2089 }
2090 }
2091 }
2092
2093 UNLOCK_THREAD(thread);
2094
2095 restore_irq(oldlevel);
2096
2097 return old_base_priority;
2098}
2099
2100/*---------------------------------------------------------------------------
2101 * Returns the current base priority for a thread.
2102 *---------------------------------------------------------------------------
2103 */
2104int thread_get_priority(unsigned int thread_id)
2105{
2106 struct thread_entry *thread = thread_id_entry(thread_id);
2107 int base_priority = thread->base_priority;
2108
2109 /* Simply check without locking slot. It may or may not be valid by the
2110 * time the function returns anyway. If all tests pass, it is the
2111 * correct value for when it was valid. */
2112 if (thread->id != thread_id || thread->state == STATE_KILLED)
2113 base_priority = -1;
2114
2115 return base_priority;
2116}
2117#endif /* HAVE_PRIORITY_SCHEDULING */
2118
2119#ifdef HAVE_IO_PRIORITY
2120int thread_get_io_priority(unsigned int thread_id)
2121{
2122 struct thread_entry *thread = thread_id_entry(thread_id);
2123 return thread->io_priority;
2124}
2125
2126void thread_set_io_priority(unsigned int thread_id,int io_priority)
2127{
2128 struct thread_entry *thread = thread_id_entry(thread_id);
2129 thread->io_priority = io_priority;
2130}
2131#endif
2132
2133/*---------------------------------------------------------------------------
2134 * Starts a frozen thread - similar semantics to wakeup_thread except that
2135 * the thread is on no scheduler or wakeup queue at all. It exists simply by
2136 * virtue of the slot having a state of STATE_FROZEN.
2137 *---------------------------------------------------------------------------
2138 */
2139void thread_thaw(unsigned int thread_id)
2140{
2141 struct thread_entry *thread = thread_id_entry(thread_id);
2142 int oldlevel = disable_irq_save();
2143
2144 LOCK_THREAD(thread);
2145
2146 /* If thread is the current one, it cannot be frozen, therefore
2147 * there is no need to check that. */
2148 if (thread->id == thread_id && thread->state == STATE_FROZEN)
2149 core_schedule_wakeup(thread);
2150
2151 UNLOCK_THREAD(thread);
2152 restore_irq(oldlevel);
2153}
2154
2155#if NUM_CORES > 1
2156/*---------------------------------------------------------------------------
2157 * Switch the processor that the currently executing thread runs on.
2158 *---------------------------------------------------------------------------
2159 */
2160unsigned int switch_core(unsigned int new_core)
2161{
2162 const unsigned int core = CURRENT_CORE;
2163 struct thread_entry *current = cores[core].running;
2164
2165 if (core == new_core)
2166 {
2167 /* No change - just return same core */
2168 return core;
2169 }
2170
2171 int oldlevel = disable_irq_save();
2172 LOCK_THREAD(current);
2173
2174 if (current->name == THREAD_DESTRUCT)
2175 {
2176 /* Thread being killed - deactivate and let process complete */
2177 unsigned int id = current->id;
2178 UNLOCK_THREAD(current);
2179 restore_irq(oldlevel);
2180 thread_wait(id);
2181 /* Should never be reached */
2182 THREAD_PANICF("switch_core->D:*R", current);
2183 }
2184
2185 /* Get us off the running list for the current core */
2186 RTR_LOCK(core);
2187 remove_from_list_l(&cores[core].running, current);
2188 rtr_subtract_entry(core, current->priority);
2189 RTR_UNLOCK(core);
2190
2191 /* Stash return value (old core) in a safe place */
2192 current->retval = core;
2193
2194 /* If a timeout hadn't yet been cleaned-up it must be removed now or
2195 * the other core will likely attempt a removal from the wrong list! */
2196 if (current->tmo.prev != NULL)
2197 {
2198 remove_from_list_tmo(current);
2199 }
2200
2201 /* Change the core number for this thread slot */
2202 current->core = new_core;
2203
2204 /* Do not use core_schedule_wakeup here since this will result in
2205 * the thread starting to run on the other core before being finished on
2206 * this one. Delay the list unlock to keep the other core stuck
2207 * until this thread is ready. */
2208 RTR_LOCK(new_core);
2209
2210 rtr_add_entry(new_core, current->priority);
2211 add_to_list_l(&cores[new_core].running, current);
2212
2213 /* Make a callback into device-specific code, unlock the wakeup list so
2214 * that execution may resume on the new core, unlock our slot and finally
2215 * restore the interrupt level */
2216 cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
2217 cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
2218 cores[core].block_task = current;
2219
2220 UNLOCK_THREAD(current);
2221
2222 /* Alert other core to activity */
2223 core_wake(new_core);
2224
2225 /* Do the stack switching, cache_maintenence and switch_thread call -
2226 requires native code */
2227 switch_thread_core(core, current);
2228
2229 /* Finally return the old core to caller */
2230 return current->retval;
2231}
2232#endif /* NUM_CORES > 1 */
2233
2234/*---------------------------------------------------------------------------
2235 * Initialize threading API. This assumes interrupts are not yet enabled. On
2236 * multicore setups, no core is allowed to proceed until create_thread calls
2237 * are safe to perform.
2238 *---------------------------------------------------------------------------
2239 */
2240void init_threads(void)
2241{
2242 const unsigned int core = CURRENT_CORE;
2243 struct thread_entry *thread;
2244
2245 if (core == CPU)
2246 {
2247 /* Initialize core locks and IDs in all slots */
2248 int n;
2249 for (n = 0; n < MAXTHREADS; n++)
2250 {
2251 thread = &threads[n];
2252 corelock_init(&thread->waiter_cl);
2253 corelock_init(&thread->slot_cl);
2254 thread->id = THREAD_ID_INIT(n);
2255 }
2256 }
2257
2258 /* CPU will initialize first and then sleep */
2259 thread = find_empty_thread_slot();
2260
2261 if (thread == NULL)
2262 {
2263 /* WTF? There really must be a slot available at this stage.
2264 * This can fail if, for example, .bss isn't zero'ed out by the loader
2265 * or threads is in the wrong section. */
2266 THREAD_PANICF("init_threads->no slot", NULL);
2267 }
2268
2269 /* Initialize initially non-zero members of core */
2270 cores[core].next_tmo_check = current_tick; /* Something not in the past */
2271
2272 /* Initialize initially non-zero members of slot */
2273 UNLOCK_THREAD(thread); /* No sync worries yet */
2274 thread->name = main_thread_name;
2275 thread->state = STATE_RUNNING;
2276 IF_COP( thread->core = core; )
2277#ifdef HAVE_PRIORITY_SCHEDULING
2278 corelock_init(&cores[core].rtr_cl);
2279 thread->base_priority = PRIORITY_USER_INTERFACE;
2280 prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
2281 thread->priority = PRIORITY_USER_INTERFACE;
2282 rtr_add_entry(core, PRIORITY_USER_INTERFACE);
2283#endif
2284
2285 add_to_list_l(&cores[core].running, thread);
2286
2287 if (core == CPU)
2288 {
2289 thread->stack = stackbegin;
2290 thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
2291#if NUM_CORES > 1 /* This code path will not be run on single core targets */
2292 /* Wait for other processors to finish their inits since create_thread
2293 * isn't safe to call until the kernel inits are done. The first
2294 * threads created in the system must of course be created by CPU.
2295 * Another possible approach is to initialize all cores and slots
2296 * for each core by CPU, let the remainder proceed in parallel and
2297 * signal CPU when all are finished. */
2298 core_thread_init(CPU);
2299 }
2300 else
2301 {
2302 /* Initial stack is the idle stack */
2303 thread->stack = idle_stacks[core];
2304 thread->stack_size = IDLE_STACK_SIZE;
2305 /* After last processor completes, it should signal all others to
2306 * proceed or may signal the next and call thread_exit(). The last one
2307 * to finish will signal CPU. */
2308 core_thread_init(core);
2309 /* Other cores do not have a main thread - go idle inside switch_thread
2310 * until a thread can run on the core. */
2311 thread_exit();
2312#endif /* NUM_CORES */
2313 }
2314#ifdef INIT_MAIN_THREAD
2315 init_main_thread(&thread->context);
2316#endif
2317}
2318
2319/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
2320#if NUM_CORES == 1
2321static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
2322#else
2323static int stack_usage(uintptr_t *stackptr, size_t stack_size)
2324#endif
2325{
2326 unsigned int stack_words = stack_size / sizeof (uintptr_t);
2327 unsigned int i;
2328 int usage = 0;
2329
2330 for (i = 0; i < stack_words; i++)
2331 {
2332 if (stackptr[i] != DEADBEEF)
2333 {
2334 usage = ((stack_words - i) * 100) / stack_words;
2335 break;
2336 }
2337 }
2338
2339 return usage;
2340}
2341
2342/*---------------------------------------------------------------------------
2343 * Returns the maximum percentage of stack a thread ever used while running.
2344 * NOTE: Some large buffer allocations that don't use enough the buffer to
2345 * overwrite stackptr[0] will not be seen.
2346 *---------------------------------------------------------------------------
2347 */
2348int thread_stack_usage(const struct thread_entry *thread)
2349{
2350 if (LIKELY(thread->stack_size > 0))
2351 return stack_usage(thread->stack, thread->stack_size);
2352 return 0;
2353}
2354
2355#if NUM_CORES > 1
2356/*---------------------------------------------------------------------------
2357 * Returns the maximum percentage of the core's idle stack ever used during
2358 * runtime.
2359 *---------------------------------------------------------------------------
2360 */
2361int idle_stack_usage(unsigned int core)
2362{
2363 return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
2364}
2365#endif
2366
2367/*---------------------------------------------------------------------------
2368 * Fills in the buffer with the specified thread's name. If the name is NULL,
2369 * empty, or the thread is in destruct state a formatted ID is written
2370 * instead.
2371 *---------------------------------------------------------------------------
2372 */
2373void thread_get_name(char *buffer, int size,
2374 struct thread_entry *thread)
2375{
2376 if (size <= 0)
2377 return;
2378
2379 *buffer = '\0';
2380
2381 if (thread)
2382 {
2383 /* Display thread name if one or ID if none */
2384 const char *name = thread->name;
2385 const char *fmt = "%s";
2386 if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
2387 {
2388 name = (const char *)(uintptr_t)thread->id;
2389 fmt = "%04lX";
2390 }
2391 snprintf(buffer, size, fmt, name);
2392 }
2393}
2394
2395/* Unless otherwise defined, do nothing */
2396#ifndef YIELD_KERNEL_HOOK
2397#define YIELD_KERNEL_HOOK() false
2398#endif
2399#ifndef SLEEP_KERNEL_HOOK
2400#define SLEEP_KERNEL_HOOK(ticks) false
2401#endif
2402
2403/*---------------------------------------------------------------------------
2404 * Suspends a thread's execution for at least the specified number of ticks.
2405 *
2406 * May result in CPU core entering wait-for-interrupt mode if no other thread
2407 * may be scheduled.
2408 *
2409 * NOTE: sleep(0) sleeps until the end of the current tick
2410 * sleep(n) that doesn't result in rescheduling:
2411 * n <= ticks suspended < n + 1
2412 * n to n+1 is a lower bound. Other factors may affect the actual time
2413 * a thread is suspended before it runs again.
2414 *---------------------------------------------------------------------------
2415 */
2416unsigned sleep(unsigned ticks)
2417{
2418 /* In certain situations, certain bootloaders in particular, a normal
2419 * threading call is inappropriate. */
2420 if (SLEEP_KERNEL_HOOK(ticks))
2421 return 0; /* Handled */
2422
2423 disable_irq();
2424 sleep_thread(ticks);
2425 switch_thread();
2426 return 0;
2427}
2428
2429/*---------------------------------------------------------------------------
2430 * Elects another thread to run or, if no other thread may be made ready to
2431 * run, immediately returns control back to the calling thread.
2432 *---------------------------------------------------------------------------
2433 */
2434void yield(void)
2435{
2436 /* In certain situations, certain bootloaders in particular, a normal
2437 * threading call is inappropriate. */
2438 if (YIELD_KERNEL_HOOK())
2439 return; /* handled */
2440
2441 switch_thread();
2442}
diff --git a/firmware/kernel/tick.c b/firmware/kernel/tick.c
new file mode 100644
index 0000000000..c524560687
--- /dev/null
+++ b/firmware/kernel/tick.c
@@ -0,0 +1,74 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Björn Stenberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#include "config.h"
23#include "tick.h"
24#include "general.h"
25#include "panic.h"
26
27/****************************************************************************
28 * Timer tick
29 *****************************************************************************/
30
31
32/* List of tick tasks - final element always NULL for termination */
33void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
34
35#if !defined(CPU_PP) || !defined(BOOTLOADER) || \
36 defined(HAVE_BOOTLOADER_USB_MODE)
37volatile long current_tick SHAREDDATA_ATTR = 0;
38#endif
39
40/* - Timer initialization and interrupt handler is defined at
41 * the target level: tick_start() is implemented in the target tree */
42
43int tick_add_task(void (*f)(void))
44{
45 int oldlevel = disable_irq_save();
46 void **arr = (void **)tick_funcs;
47 void **p = find_array_ptr(arr, f);
48
49 /* Add a task if there is room */
50 if(p - arr < MAX_NUM_TICK_TASKS)
51 {
52 *p = f; /* If already in list, no problem. */
53 }
54 else
55 {
56 panicf("Error! tick_add_task(): out of tasks");
57 }
58
59 restore_irq(oldlevel);
60 return 0;
61}
62
63int tick_remove_task(void (*f)(void))
64{
65 int oldlevel = disable_irq_save();
66 int rc = remove_array_ptr((void **)tick_funcs, f);
67 restore_irq(oldlevel);
68 return rc;
69}
70
71void init_tick(void)
72{
73 tick_start(1000/HZ);
74}
diff --git a/firmware/kernel/timeout.c b/firmware/kernel/timeout.c
new file mode 100644
index 0000000000..8039e56ffb
--- /dev/null
+++ b/firmware/kernel/timeout.c
@@ -0,0 +1,97 @@
1
2/****************************************************************************
3 * Tick-based interval timers/one-shots - be mindful this is not really
4 * intended for continuous timers but for events that need to run for a short
5 * time and be cancelled without further software intervention.
6 ****************************************************************************/
7
8#include "config.h"
9#include "system.h" /* TIME_AFTER */
10#include "kernel.h"
11#include "timeout.h"
12#include "general.h"
13
14/* list of active timeout events */
15static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
16
17/* timeout tick task - calls event handlers when they expire
18 * Event handlers may alter expiration, callback and data during operation.
19 */
20static void timeout_tick(void)
21{
22 unsigned long tick = current_tick;
23 struct timeout **p = tmo_list;
24 struct timeout *curr;
25
26 for(curr = *p; curr != NULL; curr = *(++p))
27 {
28 int ticks;
29
30 if(TIME_BEFORE(tick, curr->expires))
31 continue;
32
33 /* this event has expired - call callback */
34 ticks = curr->callback(curr);
35 if(ticks > 0)
36 {
37 curr->expires = tick + ticks; /* reload */
38 }
39 else
40 {
41 timeout_cancel(curr); /* cancel */
42 }
43 }
44}
45
46/* Cancels a timeout callback - can be called from the ISR */
47void timeout_cancel(struct timeout *tmo)
48{
49 int oldlevel = disable_irq_save();
50 int rc = remove_array_ptr((void **)tmo_list, tmo);
51
52 if(rc >= 0 && *tmo_list == NULL)
53 {
54 tick_remove_task(timeout_tick); /* Last one - remove task */
55 }
56
57 restore_irq(oldlevel);
58}
59
60/* Adds a timeout callback - calling with an active timeout resets the
61 interval - can be called from the ISR */
62void timeout_register(struct timeout *tmo, timeout_cb_type callback,
63 int ticks, intptr_t data)
64{
65 int oldlevel;
66 void **arr, **p;
67
68 if(tmo == NULL)
69 return;
70
71 oldlevel = disable_irq_save();
72
73 /* See if this one is already registered */
74 arr = (void **)tmo_list;
75 p = find_array_ptr(arr, tmo);
76
77 if(p - arr < MAX_NUM_TIMEOUTS)
78 {
79 /* Vacancy */
80 if(*p == NULL)
81 {
82 /* Not present */
83 if(*tmo_list == NULL)
84 {
85 tick_add_task(timeout_tick); /* First one - add task */
86 }
87
88 *p = tmo;
89 }
90
91 tmo->callback = callback;
92 tmo->data = data;
93 tmo->expires = current_tick + ticks;
94 }
95
96 restore_irq(oldlevel);
97}